filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_16224
|
from JumpScale import j
import argparse
import sys
class ArgumentParser(argparse.ArgumentParser):
def exit(self, status=0, message=None):
if message:
self._print_message(message, sys.stderr)
if j.application.state == "RUNNING":
j.application.stop(status)
else:
sys.exit(status)
def processLogin(parser):
parser.add_argument("-l", '--login', help='login for grid, if not specified defaults to root')
parser.add_argument("-p", '--passwd', help='passwd for grid')
parser.add_argument(
"-a", '--addr', help='ip addr of master, if not specified will be the one as specified in local config')
opts = parser.parse_args()
if opts.login is None:
opts.login = "root"
# if opts.passwd==None and opts.login=="root":
# if j.application.config.exists("grid.master.superadminpasswd"):
# opts.passwd=j.application.config.get("grid.master.superadminpasswd")
# else:
# opts.passwd=j.tools.console.askString("please provide superadmin passwd for the grid.")
# if opts.addr==None:
# opts.addr=j.application.config.get("grid.master.ip")
return opts
def getProcess(parser=None):
parser = parser or ArgumentParser()
parser.add_argument('-d', '--domain', help='Process domain name')
parser.add_argument('-n', '--name', help='Process name')
return parser.parse_args()
|
the-stack_0_16226
|
"""The :mod:`interpreter` module defines the ``PushInterpreter`` used to run Push programs."""
import traceback
from typing import Union
import time
from enum import Enum
from pyshgp.push.instruction import Instruction
from pyshgp.push.program import Program
from pyshgp.push.state import PushState
from pyshgp.push.instruction_set import InstructionSet
from pyshgp.push.atoms import Atom, Closer, Literal, InstructionMeta, CodeBlock, Input
from pyshgp.push.config import PushConfig
from pyshgp.tap import tap
from pyshgp.validation import PushError
class PushInterpreterStatus(Enum):
"""Enum class of all potential statuses of a PushInterpreter."""
normal = 1
step_limit_exceeded = 2
runtime_limit_exceeded = 3
growth_cap_exceeded = 4
class PushInterpreter:
"""An interpreter capable of running Push programs.
Parameters
----------
instruction_set : Union[InstructionSet, str], optional
The ``InstructionSet`` to use for executing programs. Default is "core"
which instantiates an ``InstructionSet`` using all the core instructions.
Attributes
----------
instruction_set : InstructionSet
The ``InstructionSet`` to use for executing programs.
state : PushState
The current ``PushState``. Contains one stack for each ``PushType``
mentioned by the instructions in the instruction set.
status : PushInterpreterStatus
A string denoting if the interpreter has encountered a situation
where non-standard termination was required.
"""
def __init__(self,
instruction_set: Union[InstructionSet, str] = "core",
reset_on_run: bool = True):
self.reset_on_run = reset_on_run
# If no instruction set given, create one and register all instructions.
if instruction_set == "core":
self.instruction_set = InstructionSet(register_core=True)
else:
self.instruction_set = instruction_set
self.type_library = self.instruction_set.type_library
# Initialize the PushState and status
self.state: PushState = None
self.status: PushInterpreterStatus = None
self._validate()
def _validate(self):
library_type_names = set(self.type_library.keys())
required_stacks = self.instruction_set.required_stacks() - {"stdout", "exec", "untyped"}
if not required_stacks <= library_type_names:
raise ValueError(
"PushInterpreter instruction_set and type_library are incompatible. {iset} vs {tlib}. Diff: {d}".format(
iset=required_stacks,
tlib=library_type_names,
d=required_stacks - library_type_names,
))
def _evaluate_instruction(self, instruction: Instruction, config: PushConfig):
self.state = instruction.evaluate(self.state, config)
def untyped_to_typed(self):
"""Infer ``PushType`` of items on state's untyped queue and push to corresponding stacks."""
while len(self.state.untyped) > 0:
el = self.state.untyped.popleft()
push_type = self.type_library.push_type_of(el, error_on_not_found=True)
self.state[push_type.name].push(el)
@tap
def evaluate_atom(self, atom: Atom, config: PushConfig):
"""Evaluate an ``Atom``.
Parameters
----------
atom : Atom
The Atom (``Literal``, ``InstructionMeta``, ``Input``, or ``CodeBlock``) to
evaluate against the current ``PushState``.
config : PushConfig
The configuration of the Push program being run.
"""
try:
if isinstance(atom, InstructionMeta):
self._evaluate_instruction(self.instruction_set[atom.name], config)
elif isinstance(atom, Input):
input_value = self.state.inputs[atom.input_index]
self.state.untyped.append(input_value)
elif isinstance(atom, CodeBlock):
for a in atom[::-1]:
self.state["exec"].push(a)
elif isinstance(atom, Literal):
self.state[atom.push_type.name].push(atom.value)
elif isinstance(atom, Closer):
raise PushError("Closers should not be in push programs. Only genomes.")
else:
raise PushError("Cannot evaluate {t}, require a subclass of Atom".format(t=type(atom)))
self.untyped_to_typed()
except Exception as e:
err_type = type(e)
err_msg = str(e)
traceback.print_exc()
raise PushError(
"{t} raised while evaluating {atom}. Original message: \"{m}\"".format(
t=err_type.__name__,
atom=atom,
m=err_msg
))
@tap
def run(self,
program: Program,
inputs: list,
print_trace: bool = False) -> list:
"""Run a Push ``Program`` given some inputs and desired output ``PushTypes``.
The general flow of this method is:
1. Create a new push state
2. Load the program and inputs.
3. If the exec stack is empty, return the outputs.
4. Else, pop the exec stack and process the atom.
5. Return to step 3.
Parameters
----------
program : Program
Program to run.
inputs : list
A sequence of values to use as inputs to the push program.
print_trace : bool
If True, each step of program execution will be summarized in stdout.
Returns
-------
Sequence
A sequence of values pulled from the final push state. May contain
pyshgp.utils.Token.no_stack_item if output stacks are empty.
"""
push_config = program.signature.push_config
if self.reset_on_run or self.state is None:
self.state = PushState(self.type_library, push_config)
self.status = PushInterpreterStatus.normal
# Setup
self.state.load_code(program.code)
self.state.load_inputs(inputs)
stop_time = time.time() + push_config.runtime_limit
steps = 0
if print_trace:
print("Initial State:")
self.state.pretty_print()
# Iterate atom evaluation until entire program is evaluated.
while len(self.state["exec"]) > 0:
# Stopping conditions
if steps > push_config.step_limit:
self.status = PushInterpreterStatus.step_limit_exceeded
break
if time.time() > stop_time:
self.status = PushInterpreterStatus.runtime_limit_exceeded
break
# Next atom in the program to evaluate.
next_atom = self.state["exec"].pop()
if print_trace:
start = time.time()
print("\nCurrent Atom: " + str(next_atom))
# Evaluate atom.
old_size = self.state.size()
self.evaluate_atom(next_atom, push_config)
if self.state.size() > old_size + push_config.growth_cap:
self.status = PushInterpreterStatus.growth_cap_exceeded
break
if print_trace:
duration = time.time() - start
print("Current State (step {step}):".format(step=steps))
self.state.pretty_print()
print("Step duration:", duration)
steps += 1
if print_trace:
print("Finished program evaluation.")
return self.state.observe_stacks(program.signature.output_stacks)
|
the-stack_0_16228
|
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from os import path
from config import INPUT_PATH, OUTPUT_PATH
def draw_response_times_plot(input_file, output_file):
sns.set_style("ticks", {"'xtick.major.size'": "0"})
response_times = pd.read_csv(path.join(INPUT_PATH, input_file), sep=';')
# colors
td_beacon_color = "#08519C"
td_no_beacon_color = "#6BAED6"
td_untrained_color = "#006D2C"
bottom_up_color = "#74C476"
flatui = [td_beacon_color, td_no_beacon_color, td_untrained_color, bottom_up_color]
# Draw a boxplot
boxplot = sns.boxplot(x="Condition", y="ResponseTime", data=response_times, palette=sns.color_palette(flatui))
# set axes dimensions & labels
boxplot.set(ylim=(0, 35000))
boxplot.set(ylabel='Response Time in msec')
# remove lines around graph
sns.despine(bottom=True, trim=True)
# save output as file, in a high resolution
fig = boxplot.get_figure()
fig.savefig(path.join(OUTPUT_PATH, output_file), dpi=300, transparent=False)
|
the-stack_0_16230
|
import subprocess, json, re
command = "Get-Service -Name Audiosrv -ComputerName asl-ad04"
p = subprocess.Popen(
[
"powershell.exe",
"({}) | ConvertTo-Json -Compress".format(command)
],
stdout=subprocess.PIPE
)
result = (p.communicate()[0]).decode('cp1252')
if re.search("^{", result):
print("Valido")
print(result)
|
the-stack_0_16231
|
#!/usr/bin/env python -tt
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
#PHENOS
"""
"""
def check_directories():
"""
Ensure all expected directories (and set-up files) are present and correct.
Create any paths that are missing.
"""
expected_directories=["DAT files",
"Genotypes",
"Layouts",
"Logs",
"Plots",
"rQTL input",
"Stinger files"]
for ed in expected_directories:
if not os.path.exists(ed):
#logging.info("Directory '{}' not found.".format(ed))
os.mkdir(ed)
#logging.info("Directory '{}' created.".format(ed))
#check_directories()
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(name='phenos',
version='3.3.0',
description='tools for handling solid media phenotyping data',
long_description=readme,
author='David B. H. Barton',
author_email='[email protected]',
url='http://github.com/gact/phenos',
license=license,
install_requires=['numpy>=1.9.2',
'scipy>=0.16.0c1',
'matplotlib>=1.4.3',
'tables>=3.2.0',
'xlrd>=0.9.3',
'brewer2mpl>=1.4.1',
'win32com'],
packages=['phenos'])
|
the-stack_0_16232
|
from setuptools import setup
import os
import glob
package_name = 'rastreator_simulation'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
(os.path.join('share', package_name, 'param'),glob.glob('param/*.yaml')),
(os.path.join('share', package_name, 'launch'),glob.glob('launch/*.launch.py')),
(os.path.join('lib', package_name, 'utils'),glob.glob('utils/*.py'))
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='iggyrrieta',
maintainer_email='[email protected]',
description='rastreator_simulation: Simulation package',
license='Apache 2.0',
entry_points={
'console_scripts': [
'ekf = rastreator_simulation.ekf_simulation:main',
'constant_cmd = rastreator_simulation.constant_cmd:main',
],
},
)
|
the-stack_0_16233
|
#!/usr/bin/env python3
"""The Graph package contains the Graph class
that carries the results from scrapping/exploring nlp models etc...
The class inherit from :obj:`rdflib.Graph`.
"""
import logging
from requests.utils import quote
import rdflib
import xlsxwriter
from rdflib.plugins.sparql.parser import parseQuery
from touch import touch
class Graph(rdflib.Graph):
"""same as a :obj:`rdflib.Graph` object (see https://rdflib.readthedocs.io/en/stable/intro_to_creating_rdf.html), but with a few additional methods
.. code:: python
>>> from lexicons_builder.graphs.graphs import Graph
RDFLib Version: 5.0.0
>>> g = Graph()
>>> # the graph has a __str__ method that serialize itself to ttl
>>> print(g)
@prefix ns1: <http://www.w3.org/2004/02/skos/core#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
<urn:default:baseUri:#holonym> a rdfs:Class ;
ns1:definition "A term that denotes a whole, a part of which is denoted by a second term. The word \"face\" is a holonym of the word \"eye\"." .
<urn:default:baseUri:#hypernym> a rdfs:Class ;
ns1:definition "a word with a broad meaning constituting a category into which words with more specific meanings fall; a superordinate. For example, colour is a hypernym of red." .
...
"""
local_namespace = "urn:default:baseUri:#"
root_words = []
root_word_uriref = rdflib.URIRef(f"{local_namespace}root_word")
base_local = rdflib.Namespace(local_namespace)
root_word_uri = f"{local_namespace}root_word_uri"
def __init__(
self, store="default", identifier=None, namespace_manager=None, base=None
):
super().__init__(
store=store,
identifier=identifier,
namespace_manager=namespace_manager,
base=base,
)
# add the root word,
self.add(
(
self.root_word_uriref,
rdflib.namespace.RDF.type,
rdflib.namespace.RDFS.Class,
)
)
self.add(
(
self.root_word_uriref,
rdflib.namespace.SKOS.definition,
rdflib.Literal(
"A root word is the term from which all of the words are fetched"
),
)
)
# hyponym
self.add(
(
self.base_local.hyponym,
rdflib.namespace.RDF.type,
rdflib.namespace.RDFS.Class,
)
)
self.add(
(
self.base_local.hyponym,
rdflib.namespace.SKOS.definition,
rdflib.Literal(
"Hyponymy is the converse of hypernymy. For example, red is a hyponym of color."
),
)
)
# hypernym
self.add(
(
self.base_local.hypernym,
rdflib.namespace.RDF.type,
rdflib.namespace.RDFS.Class,
)
)
self.add(
(
self.base_local.hypernym,
rdflib.namespace.SKOS.definition,
rdflib.Literal(
"a word with a broad meaning constituting a category into which words with more specific meanings fall; a superordinate. For example, colour is a hypernym of red."
),
)
)
# holonym
self.add(
(
self.base_local.holonym,
rdflib.namespace.RDF.type,
rdflib.namespace.RDFS.Class,
)
)
self.add(
(
self.base_local.holonym,
rdflib.namespace.SKOS.definition,
rdflib.Literal(
"""A term that denotes a whole, a part of which is denoted by a second term. The word "face" is a holonym of the word "eye"."""
),
)
)
def __contains__(self, word):
"""quick check to see if there's a word with a prefLabel predicate
that is the same as the word
>>> "book" in g
True"""
return self.word_in_graph(word)
def __str__(self):
"""quick way of serializing the graph to ttl"""
return self.to_str()
def __len__(self):
"return the number of words in the graph"
return len(self.to_list())
# did not implement __iter__ as some methods needs
# the default rdflib.Graph.__iter__()
# such as for s, p, o in self:
# def __iter__(self):
# "return the words in the graph"
# q_words = "SELECT ?word WHERE { ?_ <http://www.w3.org/2004/02/skos/core#prefLabel> ?word} ORDER BY ASC (?word)"
# for (w,) in self.query(q_words):
# yield str(w)
def word_in_graph(self, word: str) -> bool:
"""return :obj:`True` if the word is in the graph
.. code:: python
>>> g = Graph()
>>> g.add_root_word('dog')
>>> g.add_word('hound', 1, 'synonym', 'dog', comesFrom='http://example/com')
>>> g.word_in_graph('cat')
False
>>> g.word_in_graph('dog')
True
>>> # could be invoked with the in keyword
>>> 'dog' in g
True
"""
# checks if the word is already in the graph
assert isinstance(word, str), f"word is not str it is {type(word)}"
query_check = (
'ASK {?_ <http://www.w3.org/2004/02/skos/core#prefLabel> "' + word + '"}'
)
try:
parseQuery(query_check)
except Exception as e:
logging.error(f"Error while checking if the word '{word}' is in the graph")
logging.error(
f"the query '''{query_check}''' is could be badly formated OR you're using threads"
)
# the parseQuery function from rdflib could raise errors
# if used with threads
# see https://github.com/RDFLib/rdflib/issues/765
raise e
# print(f"checking if word '{word}' in graph")
if [_ for _ in self.query(query_check)][0]:
# print("it is already")
return True
else:
# print("it is not")
return False
def _check_word_type(self, word):
"raise a TypeError if type(word)!=str"
if not isinstance(word, str):
raise TypeError(
f"the word you're adding to the graph is not a string instance. It has a '{type(word)}' type"
)
def add_word(
self, word, depth, relation, target_word, synset_uri=None, comesFrom=None
):
"""Add some tripples to the graph that contains the relation between the word and its target.
Args:
word (str): The word to add to the graph
deepth (int): The deepth of the reccursion
relation (str): The relation of the word to the target word.
Could be "hyponym", "hypernym", "holonym" or "synonym"
target_word (str): The word
.. code:: python
>>> g = Graph()
>>> g.add_root_word('car')
>>> print(g)
@prefix ns1: <http://www.w3.org/2004/02/skos/core#> .
<urn:default:baseUri:#root_word_uri> a <urn:default:baseUri:#root_word> ;
ns1:prefLabel "car" .
>>> g.add_word('bus', 1, 'synonym', 'car', comesFrom='http://example.com')
>>> print(g)
@prefix ns1: <http://www.w3.org/2004/02/skos/core#> .
@prefix ns2: <urn:default:baseUri:#> .
@prefix ns3: <http://taxref.mnhn.fr/lod/property/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
ns2:bus ns3:isSynonymOf ns2:root_word_uri ;
ns1:prefLabel "bus" ;
ns2:comesFrom <http://example.com> ;
ns2:depth 1 .
ns2:root_word_uri a ns2:root_word ;
ns1:prefLabel "car" .
"""
self._check_word_type(word)
# to avoid unvalid URI
# as some wordnet words do have unwanted characters
ss_word = quote(word)
ss_target_word = quote(target_word)
assert ss_word != ss_target_word
base_wn = rdflib.Namespace("http://www.w3.org/2006/03/wn/wn20/schema/")
if relation == "hyponym":
rela = base_wn.hyponymOf
elif relation == "hypernym":
rela = base_wn.hypernymOf
elif relation == "holonym":
rela = base_wn.holonymOf
elif relation == "synonym":
# word is synonym
rela = rdflib.URIRef("http://taxref.mnhn.fr/lod/property/isSynonymOf")
else:
raise ValueError(
f"The relation '{relation}' is not implemented in the graph"
)
if depth == 1:
# the relation is linked to the root word
target = rdflib.URIRef(self.root_word_uri)
else:
target = rdflib.URIRef(self.local_namespace + ss_target_word)
# adding the relation word is synonym/hyponym/... of target word
self.add(
(
rdflib.URIRef(self.local_namespace + ss_word),
rela,
target,
)
)
# adding the depth information
self.add(
(
rdflib.URIRef(self.local_namespace + ss_word),
self.base_local.depth,
rdflib.Literal(depth),
)
)
# adding the preflabel info
self.add(
(
rdflib.URIRef(self.local_namespace + ss_word),
rdflib.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
rdflib.Literal(word),
)
)
# adding the synset info
if synset_uri:
self.add(
(
rdflib.URIRef(self.local_namespace + ss_word),
self.base_local.synsetLink,
rdflib.URIRef(synset_uri),
)
)
# adding the website the data is comming from
if comesFrom:
self.add(
(
rdflib.URIRef(self.local_namespace + ss_word),
self.base_local.comesFrom,
rdflib.URIRef(comesFrom),
)
)
assert (
"<file:///home/k/Documents/lexicons_builder/"
not in self.serialize(format="ttl").decode()
)
def add_root_word(self, word: str):
"""Before searching for related terms, the root word
from which all synonyms come from should be added to the graph. This method creates rdf tripples for the root word
Args:
word (str): The root word to add to the graph
.. code:: python
>>> g = Graph()
>>> g.add_root_word("computer")
>>> print(g)
@prefix ns1: <http://www.w3.org/2004/02/skos/core#> .
<urn:default:baseUri:#root_word_uri> a <urn:default:baseUri:#root_word> ;
ns1:prefLabel "computer" .
"""
self._check_word_type(word)
self.add(
(
rdflib.URIRef(self.root_word_uri),
rdflib.RDF.type,
rdflib.URIRef(self.local_namespace + "root_word"),
)
)
self.add(
(
rdflib.URIRef(self.root_word_uri),
rdflib.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
rdflib.Literal(word),
)
)
self._set_root_word_attribute()
def is_empty(self) -> bool:
"""return :obj:`True` if the graph does not contain synonyms, hyponyms, etc
If the graph contains only root word(s) or no words, return :obj:`False`
Note the graph contains some definitions by default
.. code:: python
>>> g = Graph()
>>> g.is_empty()
True
>>> g.add_root_word("new")
>>> g.is_empty()
True
>>> g.add_word("young", 1, "synonym", "new")
>>> g.is_empty()
False
"""
for _, p, _ in self:
if str(p) in (
"http://taxref.mnhn.fr/lod/property/isSynonymOf",
"http://www.w3.org/2006/03/wn/wn20/schema/hyponymOf",
"http://www.w3.org/2006/03/wn/wn20/schema/hypernymOf",
"http://www.w3.org/2006/03/wn/wn20/schema/holonymOf",
):
return False
else:
return True
# for s, o, p in self:
# break
# else:
# return True
def contains_synonyms(self) -> bool:
"""return :obj:`True` if the graph contains at least one synonym
.. code:: python
>>> g = Graph()
>>> g.add_root_word("new")
>>> g.contains_synonyms()
False
>>> g.add_word("young", 1, "synonym", "new")
>>> g.contains_synonyms()
True
"""
q_check = "ASK {?_ <http://taxref.mnhn.fr/lod/property/isSynonymOf> ?_2}"
return [r for r in self.query(q_check)][0]
def _set_root_word_attribute(self):
"""set the root_word and root_word_uri attributes
by looking at the self.graph"""
self.root_words = []
q_root = (
"SELECT ?uri ?pref WHERE {?uri a <"
+ self.local_namespace
+ """root_word> ;
<http://www.w3.org/2004/02/skos/core#prefLabel> ?pref }"""
)
res = [r for r in self.query(q_root)]
assert res, "The query to get the root word returned no results."
contains_root_word = False
for i, (uri, pref) in enumerate(res):
# self.root_word_uri = str(uri)
# self.root_word = str(pref)
self.root_words.append(str(pref))
contains_root_word = True
if not contains_root_word:
raise ValueError(f"The graph does not contain any root word")
# if i:
# logging.warning(
# f"The query to retrive the root word returned several results"
# )
# logging.warning(f"The root words are: {self.root_words}")
def delete_several_depth(self, method="MIN"):
"""Deletes words with several depths
Args:
word (str): The word to add to the graph
.. code:: python
>>> g = Graph()
>>> g.add_root_word('car')
>>> g.add_word('bus', 1, 'synonym', 'car', comesFrom='http://example/com')
>>> g.add_word('bus', 2, 'synonym', 'car', comesFrom='http://example/com')
>>> print(g)
@prefix ns1: <urn:default:baseUri:#> .
@prefix ns2: <http://taxref.mnhn.fr/lod/property/> .
@prefix ns3: <http://www.w3.org/2004/02/skos/core#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
ns1:bus ns2:isSynonymOf ns1:car,
ns1:root_word_uri ;
ns3:prefLabel "bus" ;
ns1:comesFrom <http://example/com> ;
ns1:depth 1,
2 .
ns1:root_word_uri a ns1:root_word ;
ns3:prefLabel "car" .
>>> g.delete_several_depth()
>>> print(g)
@prefix ns1: <urn:default:baseUri:#> .
@prefix ns2: <http://taxref.mnhn.fr/lod/property/> .
@prefix ns3: <http://www.w3.org/2004/02/skos/core#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
ns1:bus ns2:isSynonymOf ns1:car,
ns1:root_word_uri ;
ns3:prefLabel "bus" ;
ns1:comesFrom <http://example/com> ;
ns1:depth 1 .
ns1:root_word_uri a ns1:root_word ;
ns3:prefLabel "car" .
"""
# TODO should be implemented using one sparql query
q_words = """SELECT ?uri ( COUNT(?depth) AS ?c )
WHERE {?uri <urn:default:baseUri:#depth> ?depth}
GROUP BY ?uri
ORDER BY ASC (?uri)"""
for uri, depth in self.query(q_words):
if int(depth) < 2:
# skipping the uri that do not have several
# depth properties
continue
q_d = (
"SELECT (MIN(?o) AS ?int) WHERE { <"
+ str(uri)
+ """> <urn:default:baseUri:#depth> ?o } """
)
cur_dep = [int(dep) for dep, in self.query(q_d)][0]
q_all_depth = (
"SELECT ?unwanted_depth WHERE { <"
+ str(uri)
+ "> <urn:default:baseUri:#depth> ?unwanted_depth }"
)
for (unwanted_tripple,) in self.query(q_all_depth):
if int(unwanted_tripple) == cur_dep:
continue
self.remove(
(uri, self.base_local.depth, rdflib.Literal(int(unwanted_tripple)))
)
def _get_maximum_origin(self) -> int:
"""return the number maximum of <comesFrom>
predicate for a graph
:return: number
:rtype: int
>>> print(g)
ns1:article ns3:isSynonymOf ns1:paper,
ns1:piece ;
ns2:prefLabel "article" ;
ns4:hypernymOf ns1:paper ;
ns4:hyponymOf ns1:section ;
ns1:comesFrom <file:///lexicons_builder/synonyms.com>,
<file:///lexicons_builder/synonyms.reverso.net>,
<http://wordnet-rdf.princeton.edu/> ;
ns1:depth 2 ;
ns1:synsetLink <http://wordnet-rdf.princeton.edu/pwn30/06269956-n>,
<http://wordnet-rdf.princeton.edu/pwn30/06392001-n> .
>>> g._get_maximum_origin()
3
"""
query_max = """
SELECT (COUNT(?origin) as ?oCount)
WHERE
{
?uri ?origin ?_ .
FILTER (strEnds(str(?origin), 'comesFrom'))
}
GROUP BY ?uri
"""
max_ = 0
for (count,) in self.query(query_max):
if int(count) > max_:
max_ = int(count)
return max_
def pop_non_relevant_words(self):
"""Delete from the graph the words might not be relevant.
To do this, the method will search for the highest number
of `<urn:default:baseUri:#comesFrom>` predicates per word
and remove from the graph all words whose number of `<urn:default:baseUri:#comesFrom>`
predicates are lower than the maximum found before.
.. code:: python
>>> # the graph was constructed using the words "book" and "newspaper"
>>> # searching on different resources (wordnet and synonyms dictionaries)
>>> len(g)
3904
>>> g.to_list()
['(catholic) douay bible', '(mohammedan) koran', '78', 'AFISR', 'AI', 'ARDA', 'Apocrypha', 'Aramaic', 'Aramaic_script' ...
>>> # most of the word are not relevant
>>> g.pop_non_relevant_words()
>>> len(g)
106
>>> g.to_list()
['account', 'allow', 'arrange', 'article', 'assign', 'authorisation', 'batch', 'book', 'booklet', 'brochure', 'cahier', 'capture', 'card', 'classify', 'collection', ...
>>> # much more relevant words
"""
max_ = self._get_maximum_origin()
query_number_of_origins = """
SELECT ?uri ?word (COUNT(?origin) as ?oCount)
WHERE {
?uri <http://www.w3.org/2004/02/skos/core#prefLabel> ?word ;
<urn:default:baseUri:#comesFrom> ?origin
}
GROUP BY ?uri
"""
for uri, word, count in self.query(query_number_of_origins):
if int(count) < max_ - 1:
self.remove((uri, None, None))
def to_list(self) -> list:
"""return a list of all the prefLabels in the graph
>>> g = Graph()
>>> g.add_root_word('car')
>>> g.add_word('bus', 1, 'synonym', 'car', comesFrom='http://example/com')
>>> g.add_word('truck', 1, 'synonym', 'car', comesFrom='http://example/com')
>>> g.add_word('vehicle', 1, 'synonym', 'car', comesFrom='http://example/com')
>>> g.to_list()
['bus', 'car', 'truck', 'vehicle']
"""
q_words = "SELECT ?word WHERE { ?_ <http://www.w3.org/2004/02/skos/core#prefLabel> ?word} ORDER BY ASC (?word)"
return [str(w) for w, in self.query(q_words)]
# note that even that's less elegant, python's sorted function
# works faster than sparql engine's ORDER BY
# q_words = "SELECT ?word WHERE { ?_ <http://www.w3.org/2004/02/skos/core#prefLabel> ?word}"
# return sorted([str(w) for w, in self.query(q_words)])
def to_str(self) -> str:
"""return a string containing the serialized graph in the turtle format
Note that during the serialization, some items might get a file:///
string in their properties, it means the main graph has been merged
from different graph files
>>> g = Graph()
>>> g.add_root_word('dog')
>>> str(g)
'@prefix ns1: <http://www.w3.org/2004/02/skos/core#> .\\n\\n<urn:default:baseUri:#root_word_uri> a <urn:default:baseUri:#root_word> ;\\n ns1:prefLabel "dog" .\\n\\n'
"""
str_ = self.serialize(format="ttl").decode()
return str_
def to_text_file(self, out_file=None):
"""write the graph to the path provided.
Args:
out_file (str, optional): The outfile path. If None, returns the string
Example of file:
.. code:: python
book # the root word
Bible # a 1st rank synonym, linked to 'book'
Holy_Writ # a 2nd rank synonym, linked to 'Bible'
Scripture # a 2nd rank synonym, linked to 'Bible'
Word # a 2nd rank synonym, linked to 'Bible'
Epistle # a 1st rank synonym, linked to 'book'
letter # a 2nd rank synonym, linked to 'Epistle'
missive # a 2nd rank synonym, linked to 'Epistle'
"""
touch(out_file) # None can be touch ! ??
def rec_search(uri, str_=None, dep=None, uri_used=[]):
q_words = (
"""SELECT ?uri ?pref ?dep WHERE {
?uri <http://www.w3.org/2004/02/skos/core#prefLabel> ?pref ;
<urn:default:baseUri:#depth> ?dep .
?uri ?relation <"""
+ uri
+ "> } ORDER BY ASC (?pref) "
)
if not str_:
str_ = ""
res = [r for r in self.query(q_words)]
for new_uri, word, dep in res:
new_uri = str(new_uri)
word = str(word)
dep = int(dep)
assert type(dep) == int
assert type(word) == type(new_uri) == str
if new_uri in uri_used:
continue
uri_used.append(new_uri)
str_ += "\t" * dep + word + "\n"
str_ = rec_search(new_uri, str_, dep, uri_used=uri_used)
return str_
if not hasattr(self, "root_words") or not getattr(self, "root_words"):
self._set_root_word_attribute()
text = rec_search(self.root_word_uri, "\n".join(self.root_words) + "\n")
if out_file:
with open(out_file, "w") as f:
print(text, file=f)
else:
return text
logging.info(f"out file is: '{out_file}'")
def to_xlsx_file(self, out_file: str):
"""Save the graph to an excel file
Args:
out_file (str): The outfile path
"""
self._set_root_word_attribute()
workbook = xlsxwriter.Workbook(out_file)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, "root word(s)")
worksheet.write(0, 1, ", ".join(self.root_words))
q_words_depth = """SELECT ?word ?depth
WHERE { ?_ <http://www.w3.org/2004/02/skos/core#prefLabel> ?word ;
<urn:default:baseUri:#depth> ?depth ;
}
ORDER BY ASC (?word)"""
for i, (word, depth,) in enumerate(
self.query(q_words_depth), start=2
): # origin
worksheet.write(i, 0, word)
worksheet.write(i, 1, depth)
# worksheet.write(i, 2, origin)
workbook.close()
logging.info(f"out file is: '{out_file}'")
if __name__ == "__main__":
pass
|
the-stack_0_16234
|
m = int(input())
scores = list(map(int,input().split()))
scores = sorted(set(scores),reverse = True)
m=len(scores)
n = int(input())
alice = list(map(int,input().split()))
for score in alice:
if score >= scores[0] :
print (1)
elif score == scores[-1] :
print (m)
elif score < scores[-1] :
print (m+1)
else :
b=0
e=m-1
while(b<=e) :
mid=(b+e)//2
if(scores[mid] == score):
print (mid+1)
break
elif(scores[mid] > score):
if(scores[mid+1] < score):
print (mid+2)
break
b = mid + 1
else:
if(scores[mid-1] > score):
print (mid+1)
break
e = mid - 1
|
the-stack_0_16237
|
import multiprocessing
import os
import subprocess
import traceback
from itertools import product
import numpy as np
import seaborn
import torch
from matplotlib import pyplot as plt
seaborn.set()
SMALL_SIZE = 18
MEDIUM_SIZE = 22
BIGGER_SIZE = 26
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def get_gpu_memory():
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.free',
'--format=csv,nounits,noheader'
])
gpu_memory = [int(x) for x in result.decode().strip().split()]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
LOADER_WORKERS = 4
# PIN_MEMORY = True
PIN_MEMORY = False
device = None
def get_device():
global device
if device is None:
print(f'{multiprocessing.cpu_count()} CPUs')
print(f'{torch.cuda.device_count()} GPUs')
if torch.cuda.is_available():
device = 'cuda:0'
# torch.set_default_tensor_type(torch.cuda.FloatTensor)
for k, v in get_gpu_memory().items():
print(f'Device {k} memory: {v} MiB')
torch.backends.cudnn.benchmark = True
else:
# torch.set_default_tensor_type(torch.FloatTensor)
device = 'cpu'
print(f'Using: {device}')
return device
def loader(data, batch_size):
return torch.utils.data.DataLoader(dataset=data, batch_size=batch_size,
shuffle=True,
pin_memory=PIN_MEMORY,
num_workers=LOADER_WORKERS)
def load_or_run(dir_name, run_name, method, *args, **kwargs):
os.makedirs(dir_name, exist_ok=True)
filepath = os.path.join(dir_name, f'{run_name}@state')
print(f'State file: {filepath}')
loaded = False
if os.path.isfile(filepath):
try:
with open(filepath, 'rb') as f:
context = torch.load(f, map_location=get_device())
loaded = True
except Exception:
print(f'Exception when loading {filepath}')
traceback.print_exc()
if not loaded:
context = {}
context['model_state'] = None
context['run_name'] = run_name
context['dir_name'] = dir_name
# TODO maybe move arguments into context?
context, ex = method(context, *args, **kwargs)
if ex is not None:
raise ex
if 'exception' in context:
print(context['traceback'])
return context
def load_or_run_n(n, dir_name, run_name, method, *args, **kwargs):
results = []
for i in range(n):
name = f'{run_name}_{i}'
results.append(load_or_run(dir_name, name, method, *args, **kwargs))
return results
def matrix_to_figure(matrix, xlabel="", ylabel=""):
matrix = matrix.cpu().numpy()
fig, ax = plt.subplots(figsize=(16, 16), facecolor='w', edgecolor='k')
ax.imshow(matrix, cmap='Spectral_r', vmin=-1, vmax=1)
# set x axis
ax.set_xticks(np.arange(matrix.shape[1]))
ax.set_xticklabels([str(i) for i in np.arange(matrix.shape[1])], fontsize=18)
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel(xlabel)
# set y axis
ax.set_yticks(np.arange(matrix.shape[0]))
ax.set_yticklabels([str(i) for i in np.arange(matrix.shape[0])], fontsize=18)
ax.yaxis.set_label_position('left')
ax.yaxis.tick_left()
ax.set_ylabel(ylabel)
# plot text
for i, j in product(range(matrix.shape[0]), range(matrix.shape[1])):
ax.text(j, i, f'{matrix[i, j]:4.2f}' if matrix[i, j] != 0 else '.', horizontalalignment='center', fontsize=14,
verticalalignment='center', color='black')
ax.autoscale()
fig.set_tight_layout(True)
return fig
def cs_vec_to_figure(cs_vec, xlabel=""):
cs_vec = cs_vec.cpu().numpy()
fig, ax = plt.subplots(figsize=(22, 2), facecolor='w', edgecolor='k')
ax.imshow(cs_vec.reshape(1, -1), cmap='Spectral_r', vmin=-1, vmax=1)
ax.set_xticks(np.arange(cs_vec.shape[0]))
ax.set_xticklabels([str(i) for i in np.arange(cs_vec.shape[0])], fontsize=18)
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel(xlabel)
ax.set_yticks([])
for idx in range(len(cs_vec)):
ax.text(idx, 0, f'{cs_vec[idx]:4.2f}' if cs_vec[idx] != 0 else '.', horizontalalignment='center', fontsize=14,
verticalalignment='center', color='black')
ax.autoscale()
fig.set_tight_layout(True)
return fig
|
the-stack_0_16240
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 7 14:19:36 2021
@author: bressler
"""
from PICOcode.REFPROP.SeitzModel import SeitzModel
import numpy as np
import matplotlib.pyplot as plt
from baxterelectronrecoilmodel import BTM
with open('/coupp/data/home/coupp/users/bressler/output/argonspikeoutput.txt','r') as argonfile:
d = argonfile.readlines()
data = {}
for line in d:
elements = line.split()
#print([float(e) for e in elements])
if int(elements[1]) == 0:
data["Background 20C %.1f ppm %.1f psia"%(float(elements[0]), float(elements[3]))] = [float(elements[i]) for i in range(len(elements))]
elif int(elements[1]) == 137:
data["Cs-137 20 position %d %.1f ppm %.1f psia"%(int(elements[2]), float(elements[0]), float(elements[3]))] = [float(elements[i]) for i in range(len(elements))]
elif int(elements[1]) == 244:
data["Cm-244 20 %.1f ppm %.1f psia"%(float(elements[0]), float(elements[3]))] = [float(elements[i]) for i in range(len(elements))]
subtractedData = {}
subtractedargonfile = open('/coupp/data/home/coupp/users/bressler/output/subtractedargonoutput.txt','w')
for k in data.keys():
datum = data[k]
#print(datum)
if datum[1] != 0.0:
associatedbg = data["Background 20C %.1f ppm %.1f psia"%(float(datum[0]), float(datum[3]))]
bgsubrate = datum[7] - associatedbg[7]
bgsubrateerror = np.sqrt(datum[8]**2 + associatedbg[8]**2)
#print(bgsubrate)
[Qseitz, Eion, f_ion, P_ion, f_seitz, P_contaminated]= BTM(datum[4]-1.3, datum[5], 'r218')
print("P=%.2f"%datum[4])
print("T=%.2f"%datum[5])
print("Qseitz=%.3f"%Qseitz)
subtractedData[k] = [datum[0], datum[1], datum[2], datum[3], datum[4], datum[5], Qseitz, f_seitz, bgsubrate, bgsubrateerror]
subtractedargonfile.write(
'%.1f %d %d %.1f %.1f %.1f %.2f %.2f %.2f %.2f\n'%(datum[0],
datum[1], datum[2], datum[3], datum[4]-1.3, datum[5], Qseitz, f_seitz, bgsubrate, bgsubrateerror))
subtractedargonfile.close()
|
the-stack_0_16241
|
'''
Created on Apr 4, 2022
@author: mballance
'''
import dataclasses
from rctgen.impl.ctor import Ctor
from rctgen.impl.type_info import TypeInfo
from rctgen.impl.type_kind_e import TypeKindE
from rctgen.impl.exec_group import ExecGroup
from rctgen.impl.rand_t import RandT
from rctgen.impl.scalar_t import ScalarT
from libvsc import core as vsc
from rctgen.impl.pool_t import PoolT
from rctgen.impl.struct_kind_e import StructKindE
from rctgen.impl.lock_share_t import LockShareT
class DecoratorImplBase(object):
def __init__(self, kind):
self._kind = kind
self._supports_constraints = True
def populate_execs(self, ti : TypeInfo, supported_s):
return None
def __call__(self, T):
ctor = Ctor.inst()
Tp = dataclasses.dataclass(T, init=False)
ds_t = self._mkLibDataType(T, T.__qualname__, ctor.ctxt())
ti = self._mkTypeInfo(self._kind)
setattr(T, "_typeinfo", ti)
ti.lib_obj = ds_t
self._populateFields(ti, Tp)
#************************************************************
#* Populate constraints from this type and base types
#************************************************************
constraints = Ctor.inst().pop_constraint_decl()
constraint_s = set()
for c in constraints:
constraint_s.add(c._name)
ti._constraint_l.append(c)
for b in T.__bases__:
if hasattr(b, "_typeinfo"):
self._populateConstraints(
ti,
b,
constraint_s)
#************************************************************
#* Populate exec blocks from this type and base types
#************************************************************
execs = Ctor.inst().pop_exec_types()
for e in execs:
print("Exec: %s" % str(e.kind))
if not self._validateExec(e.kind):
raise Exception("Unsupported exec kind %s" % str(e.kind))
if e.kind not in ti._exec_m.keys():
ti._exec_m[e.kind] = ExecGroup(e.kind)
ti._exec_m[e.kind].add_exec(e)
for b in T.__bases__:
if hasattr(b, "_typeinfo"):
self._populateExecs(
ti,
b)
return Tp
def _validateExec(self, kind):
return True
def _validateField(self, name, type, is_rand):
return True
def _mkTypeInfo(self, kind : TypeKindE):
return TypeInfo(kind)
def _mkLibDataType(self, T, name, ctxt):
raise NotImplementedError("_mkLibDataType not implemented for %s" % str(type(self)))
def _populateFields(self, ti : TypeInfo, T):
for f in dataclasses.fields(T):
attr = vsc.ModelFieldFlag.NoFlags
is_rand = False
iv=0
t = f.type
if issubclass(t, RandT):
t = t.T
attr |= vsc.ModelFieldFlag.DeclRand
is_rand = True
ctor = Ctor.inst()
print("f: %s" % str(f))
# The signature of a creation function is:
# - name
# - is_rand
# - idx
if issubclass(t, ScalarT):
self._processFieldScalar(ti, f, attr, t)
elif issubclass(t, PoolT):
self._processFieldPool(ti, f, attr, t)
elif issubclass(t, LockShareT):
print("LockShare!")
self._processFieldLockShare(ti, f, attr, t)
elif hasattr(t, "_typeinfo") and isinstance(t._typeinfo, TypeInfo):
# This is a field of user-defined type
print("Has TypeInfo")
field_t = ctor.ctxt().mkTypeField(
f.name,
t._typeinfo.lib_obj,
attr,
None)
ti.lib_obj.addField(field_t)
ti._field_ctor_l.append((f.name, lambda name, t=t: t._createInst(t, name)))
print("Field: %s" % str(f))
pass
def _processFieldLockShare(self, ti, f, attr, t):
ctor = Ctor.inst()
if hasattr(t.T, "_typeinfo"):
print("Kind: %s" % str(t.T._typeinfo._kind))
claim_t = t.T._typeinfo.lib_obj
else:
raise Exception("Type %s is not a PyRctGen type" % t.T.__qualname__)
if f.default is not dataclasses.MISSING:
print("default: %s" % str(f.default))
raise Exception("Lock/Share fields cannot be assigned a value")
field_t = ctor.ctxt().mkTypeFieldClaim(
f.name,
claim_t,
t.IsLock)
ti.lib_obj.addField(field_t)
ti._field_ctor_l.append((f.name, t.createField))
pass
def _processFieldPool(self, ti, f, attr, t):
ctor = Ctor.inst()
decl_size = -1
pool_t = None
if hasattr(t.T, "_typeinfo"):
print("Kind: %s" % str(t.T._typeinfo._kind))
pool_t = t.T._typeinfo.lib_obj
else:
raise Exception("Type %s is not a PyRctGen type" % t.T.__qualname__)
if f.default is not dataclasses.MISSING:
if t.T._typeinfo._kind != StructKindE.Resource:
raise Exception("Only resource pools may be given a size. Pool %s is of kind %s" % (
f.name, t.T._typeinfo._kind))
decl_size = int(f.default)
field_t = ctor.ctxt().mkTypeFieldPool(
f.name,
pool_t,
attr,
decl_size)
ti.lib_obj.addField(field_t)
ti._field_ctor_l.append((f.name, t.createField))
def _processFieldScalar(self, ti, f, attr, t):
ctor = Ctor.inst()
lt = ctor.ctxt().findDataTypeInt(t.S, t.W)
if lt is None:
lt = ctor.ctxt().mkDataTypeInt(t.S, t.W)
ctor.ctxt().addDataTypeInt(lt)
iv_m = None
if f.default is not dataclasses.MISSING:
iv_m = ctor.ctxt().mkModelVal()
iv_m.setBits(t.W)
if t.S:
iv_m.set_val_i(int(f.default))
else:
iv_m.set_val_u(int(f.default))
field_t = ctor.ctxt().mkTypeField(
f.name,
lt,
attr,
iv_m)
ti.lib_obj.addField(field_t)
ti._field_ctor_l.append((f.name, t.createField))
def _populateExecs(self, ti, T):
T_ti = T._typeinfo
for kind in T_ti._exec_m.keys():
# If the target type hasn't registered an exec of this kind,
# but a base type has, then link that up
if kind not in ti._exec_m.keys():
ti._exec_m[kind] = T_ti.exec_m[kind]
elif ti._exec_m[kind].super is None:
# Link the first available super-type exec to the
# 'super' link
ti._exec_m[kind].super = T_ti.exec_m[kind]
# Now, continue working back through the inheritance hierarchy
for b in T.__bases__:
if hasattr(b, "_typeinfo"):
self._populateExecs(
ti,
b)
def _populateConstraints(self, ti, T, name_s):
T_ti = T._typeinfo
for c in T_ti.constraint_l:
if c.name not in name_s:
name_s.add(c.name)
ti.constraint_l.append(c)
for b in T.__bases__:
if hasattr(b, "_typeinfo"):
self._populateConstraints(
ti,
b,
name_s)
|
the-stack_0_16246
|
import unittest
from cybercaptain.processing.filter import processing_filter
class ProcessingFilterEQTest(unittest.TestCase):
"""
Test the filters for EQ
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
arguments = {'src': '.',
'filterby': 'EQ',
'rule': 'EQ 500', # the content must end on two as'.
'target': '.'}
self.processing = processing_filter(**arguments)
def test_eq_positive(self):
"""
Test if the filter passes EQ correctly.
"""
# border line test
self.assertTrue(self.processing.filter({"EQ":500}), 'should not be filtered')
def test_eq_negative(self):
"""
Test if the filter fails EQ correctly.
"""
# border line test
self.assertFalse(self.processing.filter({"EQ":501}), 'should be filtered')
self.assertFalse(self.processing.filter({"EQ":499}), 'should be filtered')
# deep test
self.assertFalse(self.processing.filter({"EQ":600}), 'should be filtered')
self.assertFalse(self.processing.filter({"EQ":400}), 'should be filtered')
|
the-stack_0_16249
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/score_matching_swiss_roll.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="eTpkSxV-Nwq3"
# Fit score-based generative model to 2d swiss roll data.
#
# Code is taken from
# https://jax.readthedocs.io/en/latest/notebooks/score_matching.html
#
# Notebook author: Denis Mazur, edited by Just Heuristic
#
# + id="mm7ZX-zYNwNe"
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_swiss_roll
import jax
import jax.numpy as jnp
from jax.experimental import optimizers
from jax.experimental import stax
from functools import partial
from IPython.display import clear_output
# + [markdown] id="jRbkmuINOa68"
# # Dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="hKCWtx6bN6WH" outputId="0b76611f-0bb0-4ab8-8199-bc73bbe90dda"
def sample_batch(size, noise=1.0):
x, _= make_swiss_roll(size, noise=noise)
x = x[:, [0, 2]] / 10.0
return np.array(x)
plt.figure(figsize=[16, 16])
plt.scatter(*sample_batch(10**4).T, alpha=0.1)
plt.axis('off')
plt.tight_layout()
plt.savefig('swiss_roll.png')
# + [markdown] id="wKYFKsbmOmu_"
# # Fit score function
# + id="mjWY78zpOcke"
# Set up network to predict scores
net_init, net_apply = stax.serial(
stax.Dense(128), stax.Softplus,
stax.Dense(128), stax.Softplus,
stax.Dense(2),
)
# Create optimizer. Note that both network and optimizer returns pure (stateless) functions
opt_init, opt_update, get_params = optimizers.adam(1e-3)
# + id="KRvf5xVDOsBB"
@jax.jit
def compute_loss(net_params, inputs):
# a function that computes jacobian by forward mode differentiation
jacobian = jax.jacfwd(net_apply, argnums=-1)
# we use jax.vmap to vectorize jacobian function along batch dimension
batch_jacobian = jax.vmap(partial(jacobian, net_params))(inputs) # [batch, dim, dim]
trace_jacobian = jnp.trace(batch_jacobian, axis1=1, axis2=2)
output_norm_sq = jnp.square(net_apply(net_params, inputs)).sum(axis=1)
return jnp.mean(trace_jacobian + 1/2 * output_norm_sq)
@jax.jit
def train_step(step_i, opt_state, batch, key):
net_params = get_params(opt_state)
loss = compute_loss(net_params, batch)
grads = jax.grad(compute_loss, argnums=0)(net_params, batch)
return loss, opt_update(step_i, grads, opt_state)
# + id="C61QTY6iTJLb"
def train_loop(key, train_step, nsteps):
key, subkey = jax.random.split(key)
out_shape, net_params = net_init(subkey, input_shape=(-1, 2))
opt_state = opt_init(net_params)
loss_history = []
for i in range(nsteps):
x = sample_batch(size=128)
key, subkey = jax.random.split(key)
loss, opt_state = train_step(i, opt_state, x, subkey)
loss_history.append(loss.item())
if i % 200 == 0:
clear_output(True)
plt.figure(figsize=[16, 8])
plt.subplot(1, 2, 1)
plt.title("mean loss = %.3f" % jnp.mean(jnp.array(loss_history[-32:])))
plt.scatter(jnp.arange(len(loss_history)), loss_history)
plt.grid()
plt.subplot(1, 2, 2)
net_params = get_params(opt_state)
xx = jnp.stack(jnp.meshgrid(jnp.linspace(-1.5, 2.0, 50), jnp.linspace(-1.5, 2.0, 50)), axis=-1).reshape(-1, 2)
scores = net_apply(net_params, xx)
scores_norm = jnp.linalg.norm(scores, axis=-1, ord=2, keepdims=True)
scores_log1p = scores / (scores_norm + 1e-9) * jnp.log1p(scores_norm)
plt.quiver(*xx.T, *scores_log1p.T, width=0.002, color='green')
plt.xlim(-1.5, 2.0)
plt.ylim(-1.5, 2.0)
plt.show()
return opt_state
# + colab={"base_uri": "https://localhost:8080/", "height": 498} id="R_X-lTClTOuh" outputId="04de5845-32fa-4595-f66f-57f87cb6ef19"
opt_state = train_loop(jax.random.PRNGKey(seed=42), train_step, 10000)
# + id="7kiWmJdUVgP6"
opt_state_basic = opt_state
# + [markdown] id="MDUOCMhiO3RA"
# # Plot gradient field
# + id="KcIsShngW2GM"
opt_state = opt_state_basic
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="DPHCKA-IO2tA" outputId="c674e722-ba78-445c-f5d9-02da1a8b39c2"
plt.figure(figsize=[16, 16])
net_params = get_params(opt_state)
xx = jnp.stack(jnp.meshgrid(jnp.linspace(-1.5, 1.5, 50), jnp.linspace(-1.5, 1.5, 50)), axis=-1).reshape(-1, 2)
scores = net_apply(net_params, xx)
scores_norm = jnp.linalg.norm(scores, axis=-1, ord=2, keepdims=True)
scores_log1p = scores / (scores_norm + 1e-9) * jnp.log1p(scores_norm)
plt.quiver(*xx.T, *scores_log1p.T, width=0.002, color='green')
plt.scatter(*sample_batch(10_000).T, alpha=0.1)
plt.axis('off')
plt.tight_layout()
plt.savefig('score_matching_swiss_roll.png')
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="lApxtHmTWzoN" outputId="3fb02e72-bbd8-4470-cd7f-1d83b3d3f2b1"
plt.figure(figsize=[16, 16])
net_params = get_params(opt_state)
xx = jnp.stack(jnp.meshgrid(jnp.linspace(-1.5, 1.5, 50), jnp.linspace(-1.5, 1.5, 50)), axis=-1).reshape(-1, 2)
scores = net_apply(net_params, xx)
scores_norm = jnp.linalg.norm(scores, axis=-1, ord=2, keepdims=True)
scores_log1p = scores / (scores_norm + 1e-9) * jnp.log1p(scores_norm)
plt.quiver(*xx.T, *scores_log1p.T, width=0.002, color='green')
#plt.scatter(*sample_batch(10_000).T, alpha=0.1)
plt.axis('off')
plt.tight_layout()
plt.savefig('score_matching_swiss_roll_no_data.png')
# + [markdown] id="DSgVc8mxPNiS"
# # Fit using sliced score matching
# + id="KO1FOR6_PPNn"
@jax.jit
def compute_ssm_loss(net_params, inputs, key):
apply = jax.jit(partial(net_apply, net_params))
batch_dot = partial(jnp.einsum, 'bu,bu->b')
# generate random vectors from N(0, I)
v = jax.random.normal(key, shape=inputs.shape)
# predict score and compute jacobian of score times v
score, jac_v = jax.jvp(apply, [inputs], [v])
return jnp.mean(batch_dot(v, jac_v) + 1/2 * batch_dot(v, score) ** 2)
@jax.jit
def train_step(step_i, opt_state, batch, key):
# the new compute_loss is random key dependent, thus we need a new train_step function
net_params = get_params(opt_state)
loss = compute_ssm_loss(net_params, batch, key)
grads = jax.grad(compute_ssm_loss, argnums=0)(net_params, batch, key)
return loss, opt_update(step_i, grads, opt_state)
# + colab={"base_uri": "https://localhost:8080/", "height": 498} id="SnN8RKubS3cx" outputId="b03c0751-aa73-4349-ad61-9c26daa37919"
opt_state = train_loop(jax.random.PRNGKey(seed=42), train_step, 10000)
# + id="kENOLLQRVmXQ"
opt_state_sliced = opt_state
# + [markdown] id="PPwZiwI3PfVB"
# # Plot gradient field
# + colab={"base_uri": "https://localhost:8080/", "height": 917} id="cS6WhEMlPWt5" outputId="24e3de0b-1afd-4b30-a14e-385d47582032"
plt.figure(figsize=[16, 16])
net_params = get_params(opt_state)
xx = jnp.stack(jnp.meshgrid(jnp.linspace(-1.5, 1.5, 50), jnp.linspace(-1.5, 1.5, 50)), axis=-1).reshape(-1, 2)
scores = net_apply(net_params, xx)
scores_norm = jnp.linalg.norm(scores, axis=-1, ord=2, keepdims=True)
scores_log1p = scores / (scores_norm + 1e-9) * jnp.log1p(scores_norm)
plt.quiver(*xx.T, *scores_log1p.T, width=0.002, color='green')
plt.scatter(*sample_batch(10_000).T, alpha=0.1)
plt.savefig('score_matching_sliced_swiss_roll.pdf', dpi=300)
# + colab={"base_uri": "https://localhost:8080/", "height": 933} id="DRZ3D3CTWg2W" outputId="66669c5e-7ace-4a8d-8dfd-798f2825b61e"
plt.figure(figsize=[16, 16])
net_params = get_params(opt_state)
xx = jnp.stack(jnp.meshgrid(jnp.linspace(-1.5, 1.5, 50), jnp.linspace(-1.5, 1.5, 50)), axis=-1).reshape(-1, 2)
scores = net_apply(net_params, xx)
scores_norm = jnp.linalg.norm(scores, axis=-1, ord=2, keepdims=True)
scores_log1p = scores / (scores_norm + 1e-9) * jnp.log1p(scores_norm)
plt.quiver(*xx.T, *scores_log1p.T, width=0.002, color='green')
#plt.scatter(*sample_batch(10_000).T, alpha=0.1)
# + [markdown] id="ePW3Z5SNP91R"
# # Langevin sampling
# + id="WEvDt6HGPhLS"
def sample_langevin(x_initial, *, net_params, key, eps=1e-2, eps_decay=0.9, num_steps=15, temperature=1.0):
""" sample x ~ p(x) by applying approximate Langvenin Dynamics, return a sequence of x_t """
x_t, x_sequence = x_initial, [x_initial]
for t in range(num_steps):
key, subkey = jax.random.split(key)
z_t = jax.random.normal(subkey, shape=x_t.shape)
x_t = x_t + eps / 2 * net_apply(net_params, x_t) + jnp.sqrt(eps) * temperature * z_t
x_sequence.append(x_t)
eps *= eps_decay
return jnp.stack(x_sequence)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="eHj9dTScQDog" outputId="92a5fe41-1a1f-46f5-8077-b86ef03ba5f4"
plt.figure(figsize=[16, 16])
key = jax.random.PRNGKey(42)
net_params = get_params(opt_state)
for x_initial in jnp.array([[-1.5, -1.5], [0, 0], [1.5, 0]]):
key, subkey = jax.random.split(key)
# sample x sequence
xx = sample_langevin(x_initial, key=subkey, net_params=net_params, num_steps=25)
plt.scatter(xx.T[0], xx.T[1], color="blue")
# draw arrows for each mcmc step
deltas = (xx[1:] - xx[:-1])
deltas = deltas - deltas / jnp.linalg.norm(deltas, keepdims=True, axis=-1) * 0.04
for i, arrow in enumerate(deltas):
plt.arrow(xx[i][0], xx[i][1], arrow[0], arrow[1], width=1e-4, head_width=2e-2, color="orange")
# plot data points and gradients
plt.plot()
xx = jnp.stack(jnp.meshgrid(jnp.linspace(-1.5, 1.5, 50), jnp.linspace(-1.5, 1.5, 50)), axis=-1).reshape(-1, 2)
scores = net_apply(net_params, xx)
scores_norm = jnp.linalg.norm(scores, axis=-1, ord=2, keepdims=True)
scores_log1p = scores / (scores_norm + 1e-9) * jnp.log1p(scores_norm)
plt.quiver(*xx.T, *scores_log1p.T, width=0.002, color='green')
plt.axis('off')
plt.scatter(*sample_batch(10_000).T, alpha=0.025)
plt.tight_layout()
plt.savefig('langevin_swiss_roll.png')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 33} id="6jhP-yF5QD0B" outputId="c821a148-722a-4e97-9206-627b8391ac67"
|
the-stack_0_16253
|
import torch
from .misc import _convert_to_tensor, _dot_product
def _interp_fit(y0, y1, y_mid, f0, f1, dt):
"""Fit coefficients for 4th order polynomial interpolation.
Args:
y0: function value at the start of the interval.
y1: function value at the end of the interval.
y_mid: function value at the mid-point of the interval.
f0: derivative value at the start of the interval.
f1: derivative value at the end of the interval.
dt: width of the interval.
Returns:
List of coefficients `[a, b, c, d, e]` for interpolating with the polynomial
`p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e` for values of `x`
between 0 (start of interval) and 1 (end of interval).
"""
a = tuple(
_dot_product([-2 * dt, 2 * dt, -8, -8, 16], [f0_, f1_, y0_, y1_, y_mid_])
for f0_, f1_, y0_, y1_, y_mid_ in zip(f0, f1, y0, y1, y_mid)
)
b = tuple(
_dot_product([5 * dt, -3 * dt, 18, 14, -32], [f0_, f1_, y0_, y1_, y_mid_])
for f0_, f1_, y0_, y1_, y_mid_ in zip(f0, f1, y0, y1, y_mid)
)
c = tuple(
_dot_product([-4 * dt, dt, -11, -5, 16], [f0_, f1_, y0_, y1_, y_mid_])
for f0_, f1_, y0_, y1_, y_mid_ in zip(f0, f1, y0, y1, y_mid)
)
d = tuple(dt * f0_ for f0_ in f0)
e = y0
return [a, b, c, d, e]
def _interp_evaluate(coefficients, t0, t1, t):
"""Evaluate polynomial interpolation at the given time point.
Args:
coefficients: list of Tensor coefficients as created by `interp_fit`.
t0: scalar float64 Tensor giving the start of the interval.
t1: scalar float64 Tensor giving the end of the interval.
t: scalar float64 Tensor giving the desired interpolation point.
Returns:
Polynomial interpolation of the coefficients at time `t`.
"""
dtype = coefficients[0][0].dtype
device = coefficients[0][0].device
t0 = _convert_to_tensor(t0, dtype=dtype, device=device)
t1 = _convert_to_tensor(t1, dtype=dtype, device=device)
t = _convert_to_tensor(t, dtype=dtype, device=device)
assert (t0 <= t) & (t <= t1), 'invalid interpolation, fails `t0 <= t <= t1`: {}, {}, {}'.format(t0, t, t1)
x = ((t - t0) / (t1 - t0)).type(dtype).to(device)
xs = [torch.tensor(1).type(dtype).to(device), x]
for _ in range(2, len(coefficients)):
xs.append(xs[-1] * x)
return tuple(_dot_product(coefficients_, reversed(xs)) for coefficients_ in zip(*coefficients))
|
the-stack_0_16254
|
import codecs
from typing import Any
from flask import Blueprint, jsonify
from werkzeug.exceptions import abort
from shrunk.client import ShrunkClient
from shrunk.client.exceptions import NoSuchObjectException
from shrunk.util.decorators import require_login
__all__ = ['bp']
bp = Blueprint('request', __name__, url_prefix='/api/v1/request')
@bp.route('/pending', methods=['GET'])
@require_login
def get_pending_requests(netid: str, client: ShrunkClient) -> Any:
requests = client.links.get_pending_access_requests(netid)
def jsonify_request(req: Any) -> Any:
return {
'link_id': str(req['_id']),
'title': req['title'],
'request_token': str(codecs.encode(req['request']['token'], encoding='hex'), 'utf8'),
'requesting_netid': req['request']['requesting_netid'],
'request_time': req['request']['created_at'].isoformat(),
}
return jsonify({'requests': [jsonify_request(req) for req in requests]})
@bp.route('/resolve/<hex_token:token>/accept')
@require_login
def accept_request(netid: str, client: ShrunkClient, token: bytes) -> Any:
try:
if not client.roles.has('admin', netid) and not client.links.check_access_request_permission(token, netid):
abort(403)
except NoSuchObjectException:
abort(404)
client.links.accept_access_request(token)
return '', 204
@bp.route('/resolve/<hex_token:token>/deny')
@require_login
def deny_request(netid: str, client: ShrunkClient, token: bytes) -> Any:
try:
if not client.roles.has('admin', netid) and not client.links.check_access_request_permission(token, netid):
abort(403)
except NoSuchObjectException:
abort(404)
client.links.deny_access_request(token)
return '', 204
|
the-stack_0_16255
|
import numpy as np
from scipy.interpolate.interpolate import interp1d
import matplotlib.pyplot as plt
import os
path_104 = os.path.abspath('../../../Downloads/realTime-master-AlAl-data_trial5/AlAl/data_trial5/104S/')
files_104_unsorted = os.listdir(path_104)
order = [int(str.split(ff, "_")[1]) for ff in files_104_unsorted]
files_104 = [x for _, x in sorted(zip(order, files_104_unsorted))]
nfiles_104 = len(files_104)
files2_104 = [path_104 + '/' + files_104[i] for i in range(len(files_104))]
dat_104 = [None] * nfiles_104
for i in range(nfiles_104):
with open(files2_104[i]) as file:
temp = file.readlines()
dat_104[i] = np.vstack([np.float_(str.split(temp[i])) for i in range(2,len(temp))])
path_105 = os.path.abspath('../../../Downloads/realTime-master-AlAl-data_trial5/AlAl/data_trial5/105S/')
files_105_unsorted = os.listdir(path_105)
order = [int(str.split(ff, "_")[1]) for ff in files_105_unsorted]
files_105 = [x for _, x in sorted(zip(order, files_105_unsorted))]
nfiles_105 = len(files_105)
files2_105 = [path_105 + '/' + files_105[i] for i in range(len(files_105))]
dat_105 = [None] * nfiles_105
for i in range(nfiles_105):
with open(files2_105[i]) as file:
temp = file.readlines()
dat_105[i] = np.vstack([np.float_(str.split(temp[i])) for i in range(2,len(temp))])
path_106 = os.path.abspath('../../../Downloads/realTime-master-AlAl-data_trial5/AlAl/data_trial5/106S/')
files_106_unsorted = os.listdir(path_106)
order = [int(str.split(ff, "_")[1]) for ff in files_106_unsorted]
files_106 = [x for _, x in sorted(zip(order, files_106_unsorted))]
nfiles_106 = len(files_106)
files2_106 = [path_106 + '/' + files_106[i] for i in range(len(files_106))]
dat_106 = [None] * nfiles_106
for i in range(nfiles_106):
with open(files2_106[i]) as file:
temp = file.readlines()
dat_106[i] = np.vstack([np.float_(str.split(temp[i])) for i in range(2,len(temp))])
xrange_104 = [0.75, 1.3]
xrange_105 = [1.2, 2.2]
xrange_106 = [0.65, 1.3]
M = 200
n = 1000
xx104 = np.linspace(xrange_104[0], xrange_104[1], M)
xx105 = np.linspace(xrange_105[0], xrange_105[1], M)
xx106 = np.linspace(xrange_106[0], xrange_106[1], M)
xx_all = [xx104, xx105, xx106]
sims_all = np.empty([3, n, M]) # 3 datasets, 1000 samples, 200 points on curve
for i in range(n):
ifunc = interp1d(dat_104[i][:,1], dat_104[i][:,3], kind = 'cubic')
sims_all[0, i, :] = ifunc(xx104)
ifunc = interp1d(dat_105[i][:,1], dat_105[i][:,3], kind = 'cubic')
sims_all[1, i, :] = ifunc(xx105)
ifunc = interp1d(dat_106[i][:,1], dat_106[i][:,3], kind = 'cubic')
sims_all[2, i, :] = ifunc(xx106)
with open('./../data/Al-5083/flyer_data/Data_S104S.txt') as file:
temp = file.readlines()
obs1 = np.vstack([np.float_(str.split(temp[i])) for i in range(2,len(temp))])
with open('./../data/Al-5083/flyer_data/Data_S105S.txt') as file:
temp = file.readlines()
obs2 = np.vstack([np.float_(str.split(temp[i])) for i in range(2,len(temp))])
with open('./../data/Al-5083/flyer_data/Data_S106S.txt') as file:
temp = file.readlines()
obs3 = np.vstack([np.float_(str.split(temp[i])) for i in range(2,len(temp))])
obs_all = np.empty([3, M])
ifunc = interp1d(obs1[:,1], obs1[:,0]*1e-4, kind = 'cubic')
obs_all[0] = ifunc(xx104)
ifunc = interp1d(obs2[:,1]-.2, obs2[:,0]*1e-4, kind = 'cubic')
obs_all[1] = ifunc(xx105)
ifunc = interp1d(obs3[:,0]-2.55, obs3[:,1]*1e-4, kind = 'cubic')
obs_all[2] = ifunc(xx106)
plt.plot(sims_all[0].T, color='lightgrey')
plt.plot(obs_all[0])
plt.show()
plt.plot(sims_all[1].T, color='lightgrey')
plt.plot(obs_all[1])
plt.show()
plt.plot(sims_all[2].T, color='lightgrey')
plt.plot(obs_all[2])
plt.show()
# let the obs have large time shift discrepancy
np.savetxt("./../data/Al-5083/flyer_data/sims104.csv", sims_all[0], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/sims105.csv", sims_all[1], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/sims106.csv", sims_all[2], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/obs104.csv", obs_all[0], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/obs105.csv", obs_all[1], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/obs106.csv", obs_all[2], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/xsims104.csv", xx_all[0], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/xsims105.csv", xx_all[1], delimiter=",")
np.savetxt("./../data/Al-5083/flyer_data/xsims106.csv", xx_all[2], delimiter=",")
#sim_inputs = np.genfromtxt('./../data/Al-5083/flyer_data/sim_input.csv', delimiter=',', skip_header=1)
|
the-stack_0_16257
|
#########################
# Imports
#########################
from bs4 import BeautifulSoup
from fuzzywuzzy import fuzz
import bs4, requests, json
from secrets import *
#########################
# Headers
#########################
headers = {
'Authorization': 'Bearer ' + ACCESS_TOKEN,
}
#########################
# Helper Functions
#########################
def get_tracks(url):
# Parse Webpage
html = requests.get(url).text
soup = bs4.BeautifulSoup(html, 'html.parser')
data = json.loads(soup.find(id="shoebox-ember-data-store").get_text())
name = data['data']['attributes']['name']
playlist = data['included']
# Get Track Names and Artists from Playlist
tracks = []
for track in playlist:
try:
tracks.append({
'name': track['attributes']['name'],
'artist': track['attributes']['artistName']
})
except:
continue
return name, tracks
def get_spotify_playlist(target_name):
# Get All Playlists
response = requests.get('https://api.spotify.com/v1/me/playlists', headers=headers)
playlists = json.loads(response.text)['items']
target_id = None
# Search for Playlist in Existing Playlists
for playlist in playlists:
if str(playlist['name']) == target_name:
target_id = str(playlist['id'])
# Create Playlist if it DNE
if target_id == None:
response = requests.post('https://api.spotify.com/v1/users/%s/playlists' % USER_ID, headers=headers, data='{"name":"%s","public":false}' % target_name)
target_id = str(json.loads(response.text)['id'])
return target_id
def get_spotify_playlist_tracks(target_id):
# Get All Teacks in Playlist
response = requests.get("https://api.spotify.com/v1/users/%s/playlists/%s/tracks" % (USER_ID, target_id), headers=headers)
playlist = json.loads(response.text)['items']
# Get Track Names, Artists, and URIs from Playlist
tracks = []
for track in playlist:
tracks.append({
'name': track['track']['name'],
'artist': track['track']['artists'][0]['name'],
'uri': track['track']['uri']
})
return tracks
def get_spotify_track_uri(target_name, target_artist):
# Parse Apple Music Song Name
if "(feat." in target_name:
index = target_name.find("(feat.")
target_artist += target_name[index + len("(feat."):-1]
target_name = target_name[:index]
# Get Search Results
params = (
('q', target_name),
('type', 'track'),
)
response = requests.get('https://api.spotify.com/v1/search', headers=headers, params=params)
results = json.loads(response.text)['tracks']['items']
# Return Best Fuzzy Match
scores = []
factor = 1
for track in results:
result = ""
for artist in track['artists']:
result += artist['name'] + " "
scores.append(fuzz.ratio(result.strip(), target_artist) * factor)
factor -= 0.02
return results[scores.index(max(scores))]['uri']
def delete_spotify_playlist_tracks(tracks, target_id):
# Generate Data String
uris = ""
for track in tracks:
uris += '{"uri":"' + str(track['uri']) + '"},'
data = '{"tracks":[' + uris[:-1] + "]}"
response = requests.delete('https://api.spotify.com/v1/users/%s/playlists/%s/tracks' % (USER_ID, target_id), headers=headers, data=data)
def add_spotify_playlist_tracks(tracks, target_id):
# Support 100 Track Limit
if len(tracks) > 100:
add_spotify_playlist_tracks(tracks[:100], target_id)
add_spotify_playlist_tracks(tracks[100:], target_id)
# Search for Tracks on Spotify
uris = ""
for track in tracks:
try:
uris += get_spotify_track_uri(track['name'], track['artist']) + ","
except:
print("Couldn't add " + track['name'] + " by " + track['artist'])
params = (
('uris', uris[:-1]),
)
response = requests.post('https://api.spotify.com/v1/users/%s/playlists/%s/tracks' % (USER_ID, target_id), headers=headers, params=params)
#########################
# Main Function
#########################
def ams(url):
name, cur_tracks = get_tracks(url)
target_id = get_spotify_playlist(name)
old_tracks = get_spotify_playlist_tracks(target_id)
add_tracks = [ track for track in cur_tracks if track not in old_tracks ]
del_tracks = [ track for track in old_tracks if track not in cur_tracks ]
print("Syncing " + name + "...")
delete_spotify_playlist_tracks(del_tracks, target_id)
add_spotify_playlist_tracks(add_tracks, target_id)
|
the-stack_0_16259
|
import numpy as np
from REESMath.quaternion import to_matrix
from math import atan2, asin, pi
class EulerXYZ:
def __init__(self, alpha, beta, gamma):
self.alpha = alpha # Rotation angle around x-axis in radians
self.beta = beta # Rotation angle around y-axis in radians
self.gamma = gamma # Rotation angle around z-axis in radians
def make_euler_xyz_from_matrix(R):
r00 = R[0, 0]
r01 = R[0, 1]
r02 = R[0, 2]
r10 = R[1, 0]
r20 = R[2, 0]
r21 = R[2, 1]
r22 = R[2, 2]
if r20 >= 1.0:
rz = atan2(-r01, -r02)
ry = - pi / 2.0
rx = 0.0
elif r20 <= -1.0:
rz = atan2(-r01, r02)
ry = pi / 2.0
rx = 0.0
else:
rz = atan2(r10, r00)
ry = asin(-r20)
rx = atan2(r21, r22)
return EulerXYZ(rx, ry, rz)
def make_euler_xyz_from_quaternion(Q):
return make_euler_xyz_from_matrix(to_matrix(Q))
|
the-stack_0_16261
|
from pathlib import Path
import pytest
from hookman.hookman_generator import HookManGenerator
def test_hook_man_generator(datadir, file_regression):
# Pass a folder
with pytest.raises(FileNotFoundError, match=f"File not found: *"):
HookManGenerator(hook_spec_file_path=datadir)
# Pass a invalid hook_spec_file (without specs)
Path(datadir / 'invalid_spec.py').touch()
with pytest.raises(RuntimeError, match="Invalid file, specs not defined."):
HookManGenerator(hook_spec_file_path=Path(datadir / 'invalid_spec.py'))
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs.py'))
hg.generate_project_files(dst_path=datadir)
file_regression.check((datadir / 'cpp' / 'HookCaller.hpp').read_text(), basename='HookCaller', extension='.hpp')
file_regression.check((datadir / 'binding' / 'HookCallerPython.cpp').read_text(), basename='HookCallerPython', extension='.cpp')
def test_hook_man_generator_no_pyd(datadir, file_regression):
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs_no_pyd.py'))
hg.generate_project_files(dst_path=datadir)
obtained_hook_caller_file = datadir / 'cpp' / 'HookCaller.hpp'
file_regression.check(obtained_hook_caller_file.read_text(), basename='HookCallerNoPyd', extension='.hpp')
assert not (datadir / 'binding').is_dir()
def test_generate_plugin_template(datadir, file_regression):
plugin_dir = datadir / 'test_generate_plugin_template'
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs.py'))
hg.generate_plugin_template(
caption='Acme',
plugin_id='acme',
author_name='FOO',
author_email='[email protected]',
dst_path=plugin_dir
)
obtained_hook_specs_file = datadir / 'test_generate_plugin_template/acme/src/hook_specs.h'
file_regression.check(obtained_hook_specs_file.read_text(), basename='generate_hook_specs', extension='.h')
obtained_plugin_yaml = datadir / 'test_generate_plugin_template/acme/assets/plugin.yaml'
file_regression.check(obtained_plugin_yaml.read_text(), basename='generate_plugin', extension='.yaml')
obtained_plugin_file = datadir / 'test_generate_plugin_template/acme/src/acme.cpp'
file_regression.check(obtained_plugin_file.read_text(), basename='generate_plugin', extension='.cpp')
obtained_readme = datadir / 'test_generate_plugin_template/acme/assets/README.md'
file_regression.check(obtained_readme.read_text(), basename='generate_README', extension='.md')
obtained_cmake_list = datadir / 'test_generate_plugin_template/acme/CMakeLists.txt'
file_regression.check(obtained_cmake_list.read_text(), basename='generate_CMakeLists', extension='.txt')
obtained_cmake_list_src = datadir / 'test_generate_plugin_template/acme/src/CMakeLists.txt'
file_regression.check(obtained_cmake_list_src.read_text(), basename='generate_src_CMakeLists', extension='.txt')
obtained_compile_script = datadir / 'test_generate_plugin_template/acme/compile.py'
file_regression.check(obtained_compile_script.read_text(), basename='generate_compile', extension='.py')
def test_generate_plugin_template_source_content_with_extra_includes (datadir, file_regression):
plugin_dir = datadir / 'test_generate_plugin_template_with_extra_include'
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs.py'))
hg.generate_plugin_template(
caption='Acme',
plugin_id='acme',
author_name='FOO',
author_email='[email protected]',
dst_path=plugin_dir,
extra_includes=['<my_sdk/sdk.h>'],
)
obtained_plugin_file = datadir / 'test_generate_plugin_template_with_extra_include/acme/src/acme.cpp'
file_regression.check(obtained_plugin_file.read_text(), basename='plugin_file_with_extra_includes', extension='.cpp')
def test_generate_plugin_template_source_content_with_default_impls(datadir, file_regression):
plugin_dir = datadir / 'test_generate_plugin_template_source_content_with_default_impls'
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs.py'))
extra_body_lines = [
'HOOK_FRICTION_FACTOR(v1, v2)',
'{',
' return 0;',
'}',
]
hg.generate_plugin_template(
caption='Acme',
plugin_id='acme',
author_name='FOO',
author_email='[email protected]',
dst_path=plugin_dir,
extra_body_lines=extra_body_lines,
exclude_hooks=['HOOK_FRICTION_FACTOR']
)
obtained_plugin_file = datadir / 'test_generate_plugin_template_source_content_with_default_impls/acme/src/acme.cpp'
file_regression.check(obtained_plugin_file.read_text(), basename='plugin_file_with_default_impl', extension='.cpp')
def test_generate_plugin_template_source_wrong_arguments(datadir):
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs.py'))
with pytest.raises(ValueError, match='extra_includes parameter must be a list, got int'):
hg._validate_parameter('extra_includes', 1)
with pytest.raises(ValueError, match='All elements of extra_includes must be a string'):
hg._validate_parameter('extra_includes', ['xx', 1])
def test_generate_hook_specs_header(datadir, file_regression):
plugin_dir = datadir / 'my-plugin'
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs.py'))
hg.generate_hook_specs_header(plugin_id='acme', dst_path=plugin_dir)
obtained_hook_specs_file = plugin_dir / 'acme/src/hook_specs.h'
file_regression.check(obtained_hook_specs_file.read_text(), basename='generate_hook_specs_header1', extension='.h')
hg = HookManGenerator(hook_spec_file_path=Path(datadir / 'hook_specs_2.py'))
hg.generate_hook_specs_header(plugin_id='acme', dst_path=plugin_dir)
file_regression.check(obtained_hook_specs_file.read_text(), basename='generate_hook_specs_header2', extension='.h')
def test_generate_plugin_package_invalid_shared_lib_name(acme_hook_specs_file, tmpdir):
hg = HookManGenerator(hook_spec_file_path=acme_hook_specs_file)
from hookman.exceptions import HookmanError
with pytest.raises(HookmanError):
hg.generate_plugin_template(
caption='acme',
plugin_id='acm#e',
author_email='acme1',
author_name='acme2',
dst_path=Path(tmpdir)
)
with pytest.raises(HookmanError):
hg.generate_plugin_template(
caption='acme',
plugin_id='acm e',
author_email='acme1',
author_name='acme2',
dst_path=Path(tmpdir)
)
with pytest.raises(HookmanError):
hg.generate_plugin_template(
caption='1acme',
plugin_id='acm e',
author_email='acme1',
author_name='acme2',
dst_path=Path(tmpdir)
)
def test_generate_plugin_package(acme_hook_specs_file, tmpdir, mock_plugin_id_from_dll):
hg = HookManGenerator(hook_spec_file_path=acme_hook_specs_file)
plugin_id = 'acme'
hg.generate_plugin_template(
caption='acme',
plugin_id='acme',
author_email='acme1',
author_name='acme2',
dst_path=Path(tmpdir)
)
plugin_dir = Path(tmpdir) / 'acme'
artifacts_dir = plugin_dir / 'artifacts'
artifacts_dir.mkdir()
import sys
shared_lib_name = f"{plugin_id}.dll" if sys.platform == 'win32' else f"lib{plugin_id}.so"
shared_lib_path = artifacts_dir / shared_lib_name
shared_lib_path.write_text('')
hg.generate_plugin_package(
package_name='acme',
plugin_dir=plugin_dir,
)
from hookman.plugin_config import PluginInfo
version = PluginInfo(Path(tmpdir / 'acme/assets/plugin.yaml'), None).version
win_plugin_name = f"{plugin_id}-{version}-win64.hmplugin"
linux_plugin_name = f"{plugin_id}-{version}-linux64.hmplugin"
hm_plugin_name = win_plugin_name if sys.platform == 'win32' else linux_plugin_name
compressed_plugin = plugin_dir / hm_plugin_name
assert compressed_plugin.exists()
from zipfile import ZipFile
plugin_file_zip = ZipFile(compressed_plugin)
list_of_files = [file.filename for file in plugin_file_zip.filelist]
assert 'assets/plugin.yaml' in list_of_files
assert 'assets/README.md' in list_of_files
assert f'artifacts/{shared_lib_name}' in list_of_files
def test_generate_plugin_package_with_missing_folders(acme_hook_specs_file, tmpdir, mocker):
import sys
from textwrap import dedent
from hookman.exceptions import AssetsDirNotFoundError, ArtifactsDirNotFoundError, SharedLibraryNotFoundError
hg = HookManGenerator(hook_spec_file_path=acme_hook_specs_file)
plugin_dir = Path(tmpdir) / 'acme'
plugin_dir.mkdir()
# -- Without Assets Folder
with pytest.raises(AssetsDirNotFoundError):
hg.generate_plugin_package(package_name='acme', plugin_dir=plugin_dir)
asset_dir = plugin_dir / 'assets'
asset_dir.mkdir()
# -- Without Artifacts Folder
with pytest.raises(ArtifactsDirNotFoundError, match=r'Artifacts directory not found: .*[\\/]acme[\\/]artifacts'):
hg.generate_plugin_package(package_name='acme', plugin_dir=plugin_dir)
artifacts_dir = plugin_dir / 'artifacts'
artifacts_dir.mkdir()
# -- Without a shared library binary
shared_lib_extension = '*.dll' if sys.platform == 'win32' else '*.so'
string_to_match = fr'Unable to locate a shared library ({shared_lib_extension}) in'
import re
with pytest.raises(FileNotFoundError, match=re.escape(string_to_match)):
hg.generate_plugin_package(package_name='acme', plugin_dir=plugin_dir)
lib_name = 'test.dll' if sys.platform == 'win32' else 'libtest.so'
shared_library_file = artifacts_dir / lib_name
shared_library_file.write_text('')
# -- Without Config file
with pytest.raises(FileNotFoundError, match=f'Unable to locate the file plugin.yaml in'):
hg.generate_plugin_package(package_name='acme', plugin_dir=plugin_dir)
config_file = asset_dir / 'plugin.yaml'
config_file.write_text(dedent(f"""\
caption: 'ACME'
version: '1.0.0'
author: 'acme_author'
email: 'acme_email'
id: 'acme'
"""))
# -- Without Readme file
with pytest.raises(FileNotFoundError, match=f'Unable to locate the file README.md in'):
hg.generate_plugin_package(package_name='acme', plugin_dir=plugin_dir)
readme_file = asset_dir / 'README.md'
readme_file.write_text('')
# # -- With a invalid shared_library name on config_file
acme_lib_name = 'acme.dll' if sys.platform == 'win32' else 'libacme.so'
hm_plugin_name = 'acme-1.0.0-win64.hmplugin' if sys.platform == 'win32' else 'acme-1.0.0-linux64.hmplugin'
with pytest.raises(SharedLibraryNotFoundError, match=f'{acme_lib_name} could not be found in *'):
hg.generate_plugin_package(package_name='acme', plugin_dir=plugin_dir)
acme_shared_library_file = artifacts_dir / acme_lib_name
acme_shared_library_file.write_text('')
# The mock bellow is to avoid to have get a valid dll on this test
from hookman.plugin_config import PluginInfo
mocker.patch.object(PluginInfo, '_get_plugin_id_from_dll', return_value='')
hg.generate_plugin_package(package_name='acme', plugin_dir=plugin_dir)
compressed_plugin_package = plugin_dir / hm_plugin_name
assert compressed_plugin_package.exists()
def test_generate_plugin_package_invalid_version(acme_hook_specs_file, tmp_path, mocker, mock_plugin_id_from_dll):
hg = HookManGenerator(hook_spec_file_path=acme_hook_specs_file)
plugin_id = 'acme'
hg.generate_plugin_template(plugin_id, plugin_id, 'acme1', 'acme2', tmp_path)
plugin_yaml = tmp_path / 'acme/assets/plugin.yaml'
new_content = plugin_yaml.read_text().replace("version: '1.0.0'", "version: '1'")
plugin_yaml.write_text(new_content)
mocker.patch('hookman.hookman_generator.HookManGenerator._validate_package_folder', return_value=None)
with pytest.raises(ValueError, match="Version attribute does not follow semantic version, got '1'"):
hg.generate_plugin_package(plugin_id, plugin_dir=tmp_path / plugin_id)
|
the-stack_0_16263
|
#!/usr/bin/env python3
''' decrypts the first passage'''
from Vigenere import Vigenere
keyword_1 = 'kryptos'
keyword_2 = 'abscissa'
with open('text_b.txt', 'r') as f:
text = f.read().replace('\n', '').lower()
text = text[:text.index('?')]
# cut into 14x24 matrix
matrix = []
for i in range(14):
matrix.append(list(text[i*24:(i+1)*24]))
# rotate
matrix = zip(*matrix[::-1])
# restring it
text = ''
for line in matrix:
text += ''.join(line)
# cut into 42x8 matrix
matrix = []
for i in range(42):
matrix.append(list(text[i*8:(i+1)*8]))
# rotate
matrix = zip(*matrix[::-1])
# restring it
text = ''
for line in matrix:
text += ''.join(line)
print(text)
|
the-stack_0_16264
|
#!/usr/bin/env python
#
# Electrum - lightweight STRAKS client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module uses functions from TLSLite (public domain)
#
# TLSLite Authors:
# Trevor Perrin
# Martin von Loewis - python 3 port
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
"""Pure-Python RSA implementation."""
import os
import math
import hashlib
from .pem import *
def SHA1(x):
return hashlib.sha1(x).digest()
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Check that os.urandom works
import zlib
length = len(zlib.compress(os.urandom(1000)))
assert(length > 900)
def getRandomBytes(howMany):
b = bytearray(os.urandom(howMany))
assert(len(b) == howMany)
return b
prngName = "os.urandom"
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(b):
total = 0
multiplier = 1
for count in range(len(b)-1, -1, -1):
byte = b[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToByteArray(n, howManyBytes=None):
"""Convert an integer into a bytearray, zero-pad to howManyBytes.
The returned bytearray may be smaller than howManyBytes, but will
not be larger. The returned bytearray will contain a big-endian
encoding of the input integer (n).
"""
if howManyBytes == None:
howManyBytes = numBytes(n)
b = bytearray(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
b[count] = int(n % 256)
n >>= 8
return b
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
b = bytearray(mpi[4:])
return bytesToNumber(b)
def numberToMPI(n):
b = numberToByteArray(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
b = bytearray(4+ext) + b
b[0] = (length >> 24) & 0xFF
b[1] = (length >> 16) & 0xFF
b[2] = (length >> 8) & 0xFF
b[3] = length & 0xFF
return bytes(b)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) // gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
q = d // c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
def powMod(base, power, modulus):
if power < 0:
result = pow(base, power*-1, modulus)
result = invMod(result, modulus)
return result
else:
return pow(base, power, modulus)
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = list(range(n))
for count in range(2, int(math.sqrt(n))+1):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print("*", end=' ')
s, t = n-1, 0
while s % 2 == 0:
s, t = s//2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = ((2 ** (bits-1)) * 3) // 2
high = 2 ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print(".", end=' ')
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3//2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print(".", end=' ')
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
class RSAKey(object):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if (n and not e) or (e and not n):
raise AssertionError()
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
self.dP = dP
self.dQ = dQ
self.qInv = qInv
self.blinder = 0
self.unblinder = 0
def __len__(self):
"""Return the length of this key in bits.
@rtype: int
"""
return numBits(self.n)
def hasPrivateKey(self):
return self.d != 0
def hashAndSign(self, bytes):
"""Hash and sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1-SHA1 signature on the passed-in data.
@type bytes: str or L{bytearray} of unsigned bytes
@param bytes: The value which will be hashed and signed.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1-SHA1 signature on the passed-in data.
"""
hashBytes = SHA1(bytearray(bytes))
prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes)
sigBytes = self.sign(prefixedHashBytes)
return sigBytes
def hashAndVerify(self, sigBytes, bytes):
"""Hash and verify the passed-in bytes with the signature.
This verifies a PKCS1-SHA1 signature on the passed-in data.
@type sigBytes: L{bytearray} of unsigned bytes
@param sigBytes: A PKCS1-SHA1 signature.
@type bytes: str or L{bytearray} of unsigned bytes
@param bytes: The value which will be hashed and verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
hashBytes = SHA1(bytearray(bytes))
# Try it with/without the embedded NULL
prefixedHashBytes1 = self._addPKCS1SHA1Prefix(hashBytes, False)
prefixedHashBytes2 = self._addPKCS1SHA1Prefix(hashBytes, True)
result1 = self.verify(sigBytes, prefixedHashBytes1)
result2 = self.verify(sigBytes, prefixedHashBytes2)
return (result1 or result2)
def sign(self, bytes):
"""Sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1 signature on the passed-in data.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be signed.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1 signature on the passed-in data.
"""
if not self.hasPrivateKey():
raise AssertionError()
paddedBytes = self._addPKCS1Padding(bytes, 1)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPrivateKeyOp(m)
sigBytes = numberToByteArray(c, numBytes(self.n))
return sigBytes
def verify(self, sigBytes, bytes):
"""Verify the passed-in bytes with the signature.
This verifies a PKCS1 signature on the passed-in data.
@type sigBytes: L{bytearray} of unsigned bytes
@param sigBytes: A PKCS1 signature.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
if len(sigBytes) != numBytes(self.n):
return False
paddedBytes = self._addPKCS1Padding(bytes, 1)
c = bytesToNumber(sigBytes)
if c >= self.n:
return False
m = self._rawPublicKeyOp(c)
checkBytes = numberToByteArray(m, numBytes(self.n))
return checkBytes == paddedBytes
def encrypt(self, bytes):
"""Encrypt the passed-in bytes.
This performs PKCS1 encryption of the passed-in data.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be encrypted.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1 encryption of the passed-in data.
"""
paddedBytes = self._addPKCS1Padding(bytes, 2)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPublicKeyOp(m)
encBytes = numberToByteArray(c, numBytes(self.n))
return encBytes
def decrypt(self, encBytes):
"""Decrypt the passed-in bytes.
This requires the key to have a private component. It performs
PKCS1 decryption of the passed-in data.
@type encBytes: L{bytearray} of unsigned bytes
@param encBytes: The value which will be decrypted.
@rtype: L{bytearray} of unsigned bytes or None.
@return: A PKCS1 decryption of the passed-in data or None if
the data is not properly formatted.
"""
if not self.hasPrivateKey():
raise AssertionError()
if len(encBytes) != numBytes(self.n):
return None
c = bytesToNumber(encBytes)
if c >= self.n:
return None
m = self._rawPrivateKeyOp(c)
decBytes = numberToByteArray(m, numBytes(self.n))
#Check first two bytes
if decBytes[0] != 0 or decBytes[1] != 2:
return None
#Scan through for zero separator
for x in range(1, len(decBytes)-1):
if decBytes[x]== 0:
break
else:
return None
return decBytes[x+1:] #Return everything after the separator
# **************************************************************************
# Helper Functions for RSA Keys
# **************************************************************************
def _addPKCS1SHA1Prefix(self, bytes, withNULL=True):
# There is a long history of confusion over whether the SHA1
# algorithmIdentifier should be encoded with a NULL parameter or
# with the parameter omitted. While the original intention was
# apparently to omit it, many toolkits went the other way. TLS 1.2
# specifies the NULL should be included, and this behavior is also
# mandated in recent versions of PKCS #1, and is what tlslite has
# always implemented. Anyways, verification code should probably
# accept both. However, nothing uses this code yet, so this is
# all fairly moot.
if not withNULL:
prefixBytes = bytearray(\
[0x30,0x1f,0x30,0x07,0x06,0x05,0x2b,0x0e,0x03,0x02,0x1a,0x04,0x14])
else:
prefixBytes = bytearray(\
[0x30,0x21,0x30,0x09,0x06,0x05,0x2b,0x0e,0x03,0x02,0x1a,0x05,0x00,0x04,0x14])
prefixedBytes = prefixBytes + bytes
return prefixedBytes
def _addPKCS1Padding(self, bytes, blockType):
padLength = (numBytes(self.n) - (len(bytes)+3))
if blockType == 1: #Signature padding
pad = [0xFF] * padLength
elif blockType == 2: #Encryption padding
pad = bytearray(0)
while len(pad) < padLength:
padBytes = getRandomBytes(padLength * 2)
pad = [b for b in padBytes if b != 0]
pad = pad[:padLength]
else:
raise AssertionError()
padding = bytearray([0,blockType] + pad + [0])
paddedBytes = padding + bytes
return paddedBytes
def _rawPrivateKeyOp(self, m):
#Create blinding values, on the first pass:
if not self.blinder:
self.unblinder = getRandomNumber(2, self.n)
self.blinder = powMod(invMod(self.unblinder, self.n), self.e,
self.n)
#Blind the input
m = (m * self.blinder) % self.n
#Perform the RSA operation
c = self._rawPrivateKeyOpHelper(m)
#Unblind the output
c = (c * self.unblinder) % self.n
#Update blinding values
self.blinder = (self.blinder * self.blinder) % self.n
self.unblinder = (self.unblinder * self.unblinder) % self.n
#Return the output
return c
def _rawPrivateKeyOpHelper(self, m):
#Non-CRT version
#c = powMod(m, self.d, self.n)
#CRT version (~3x faster)
s1 = powMod(m, self.dP, self.p)
s2 = powMod(m, self.dQ, self.q)
h = ((s1 - s2) * self.qInv) % self.p
c = s2 + self.q * h
return c
def _rawPublicKeyOp(self, c):
m = powMod(c, self.e, self.n)
return m
def acceptsPassword(self):
return False
def generate(bits):
key = RSAKey()
p = getRandomPrime(bits//2, False)
q = getRandomPrime(bits//2, False)
t = lcm(p-1, q-1)
key.n = p * q
key.e = 65537
key.d = invMod(key.e, t)
key.p = p
key.q = q
key.dP = key.d % (p-1)
key.dQ = key.d % (q-1)
key.qInv = invMod(q, p)
return key
generate = staticmethod(generate)
|
the-stack_0_16267
|
# coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from k8s.models.common import ObjectMeta
from k8s.models.apiextensions_v1_custom_resource_definition import CustomResourceConversion,\
CustomResourceDefinitionNames, CustomResourceDefinitionSpec, CustomResourceDefinition,\
CustomResourceDefinitionVersion, CustomResourceValidation, JSONSchemaProps
LOG = logging.getLogger(__name__)
class CrdResourcesSyncerApiextensionsV1(object):
@staticmethod
def _create_or_update(kind, plural, short_names, group, schema_properties):
name = "%s.%s" % (plural, group)
metadata = ObjectMeta(name=name)
names = CustomResourceDefinitionNames(kind=kind, plural=plural, shortNames=short_names)
schema = CustomResourceValidation(openAPIV3Schema=JSONSchemaProps(type="object", properties=schema_properties))
version_v1 = CustomResourceDefinitionVersion(name="v1", served=True, storage=True, schema=schema)
spec = CustomResourceDefinitionSpec(
group=group,
names=names,
versions=[version_v1],
preserveUnknownFields=False,
scope="Namespaced",
conversion=CustomResourceConversion(strategy="None")
)
definition = CustomResourceDefinition.get_or_create(metadata=metadata, spec=spec)
definition.save()
LOG.info("Created or updated CustomResourceDefinition with name %s", name)
@classmethod
def update_crd_resources(cls):
object_with_unknown_fields = {"type": "object", "x-kubernetes-preserve-unknown-fields": True}
application_schema_properties = {
"spec": {
"type": "object",
"properties": {
"application": {
"type": "string",
},
"image": {
"type": "string",
},
"config": object_with_unknown_fields,
"additional_labels": {
"type": "object",
"properties": {
"global": object_with_unknown_fields,
"deployment": object_with_unknown_fields,
"horizontal_pod_autoscaler": object_with_unknown_fields,
"ingress": object_with_unknown_fields,
"service": object_with_unknown_fields,
"service_account": object_with_unknown_fields,
"pod": object_with_unknown_fields,
"status": object_with_unknown_fields,
}
},
"additional_annotations": {
"type": "object",
"properties": {
"global": object_with_unknown_fields,
"deployment": object_with_unknown_fields,
"horizontal_pod_autoscaler": object_with_unknown_fields,
"ingress": object_with_unknown_fields,
"service": object_with_unknown_fields,
"service_account": object_with_unknown_fields,
"pod": object_with_unknown_fields,
"status": object_with_unknown_fields,
}
}
}
}
}
application_status_schema_properties = {
"result": {
"type": "string"
},
"logs": {
"type": "array",
"items": {
"type": "string"
}
}
}
cls._create_or_update("Application", "applications", ("app", "fa"), "fiaas.schibsted.io",
application_schema_properties)
cls._create_or_update("ApplicationStatus", "application-statuses", ("status", "appstatus", "fs"),
"fiaas.schibsted.io", application_status_schema_properties)
|
the-stack_0_16268
|
import os
from collections import OrderedDict
from conans.client import tools
from conans.client.build.compiler_flags import architecture_flag, parallel_compiler_cl_flag
from conans.client.build.cppstd_flags import cppstd_from_settings, cppstd_flag_new as cppstd_flag
from conans.client.tools import cross_building
from conans.client.tools.apple import is_apple_os
from conans.client.tools.oss import get_cross_building_settings
from conans.errors import ConanException
from conans.model.build_info import DEFAULT_BIN, DEFAULT_INCLUDE, DEFAULT_LIB, DEFAULT_SHARE
from conans.util.env_reader import get_env
from conans.util.log import logger
verbose_definition_name = "CMAKE_VERBOSE_MAKEFILE"
cmake_install_prefix_var_name = "CMAKE_INSTALL_PREFIX"
runtime_definition_var_name = "CONAN_LINK_RUNTIME"
cmake_in_local_cache_var_name = "CONAN_IN_LOCAL_CACHE"
def get_toolset(settings):
if settings.get_safe("compiler") == "Visual Studio":
subs_toolset = settings.get_safe("compiler.toolset")
if subs_toolset:
return subs_toolset
return None
def get_generator(settings):
# Returns the name of the generator to be used by CMake
if "CONAN_CMAKE_GENERATOR" in os.environ:
return os.environ["CONAN_CMAKE_GENERATOR"]
compiler = settings.get_safe("compiler")
arch = settings.get_safe("arch")
compiler_version = settings.get_safe("compiler.version")
os_build, _, _, _ = get_cross_building_settings(settings)
if not compiler or not compiler_version or not arch:
if os_build == "Windows":
logger.warning("CMake generator could not be deduced from settings")
return None
return "Unix Makefiles"
if compiler == "Visual Studio":
_visuals = {'8': '8 2005',
'9': '9 2008',
'10': '10 2010',
'11': '11 2012',
'12': '12 2013',
'14': '14 2015',
'15': '15 2017',
'16': '16 2019'}.get(compiler_version, "UnknownVersion %s" % compiler_version)
base = "Visual Studio %s" % _visuals
return base
# The generator depends on the build machine, not the target
if os_build == "Windows" and compiler != "qcc":
return "MinGW Makefiles" # it is valid only under Windows
return "Unix Makefiles"
def get_generator_platform(settings, generator):
# Returns the generator platform to be used by CMake
if "CONAN_CMAKE_GENERATOR_PLATFORM" in os.environ:
return os.environ["CONAN_CMAKE_GENERATOR_PLATFORM"]
compiler = settings.get_safe("compiler")
arch = settings.get_safe("arch")
if settings.get_safe("os") == "WindowsCE":
return settings.get_safe("os.platform")
if compiler == "Visual Studio" and generator and "Visual" in generator:
return {"x86": "Win32",
"x86_64": "x64",
"armv7": "ARM",
"armv8": "ARM64"}.get(arch)
return None
def is_multi_configuration(generator):
if not generator:
return False
return "Visual" in generator or "Xcode" in generator
def is_toolset_supported(generator):
# https://cmake.org/cmake/help/v3.14/variable/CMAKE_GENERATOR_TOOLSET.html
if not generator:
return False
return "Visual" in generator or "Xcode" in generator or "Green Hills MULTI" in generator
def is_generator_platform_supported(generator):
# https://cmake.org/cmake/help/v3.14/variable/CMAKE_GENERATOR_PLATFORM.html
if not generator:
return False
return "Visual" in generator or "Green Hills MULTI" in generator
def verbose_definition(value):
return {verbose_definition_name: "ON" if value else "OFF"}
def in_local_cache_definition(value):
return {cmake_in_local_cache_var_name: "ON" if value else "OFF"}
def runtime_definition(runtime):
return {runtime_definition_var_name: "/%s" % runtime} if runtime else {}
def build_type_definition(new_build_type, old_build_type, generator, output):
if new_build_type and new_build_type != old_build_type:
output.warn("Forced CMake build type ('%s') different from the settings build type ('%s')"
% (new_build_type, old_build_type))
build_type = new_build_type or old_build_type
if build_type and not is_multi_configuration(generator):
return {"CMAKE_BUILD_TYPE": build_type}
return {}
class CMakeDefinitionsBuilder(object):
def __init__(self, conanfile, cmake_system_name=True, make_program=None,
parallel=True, generator=None, set_cmake_flags=False,
forced_build_type=None, output=None):
self._conanfile = conanfile
self._forced_cmake_system_name = cmake_system_name
self._make_program = make_program
self._parallel = parallel
self._generator = generator
self._set_cmake_flags = set_cmake_flags
self._forced_build_type = forced_build_type
self._output = output
def _ss(self, setname):
"""safe setting"""
return self._conanfile.settings.get_safe(setname)
def _get_cpp_standard_vars(self):
cppstd = cppstd_from_settings(self._conanfile.settings)
if not cppstd:
return {}
definitions = {}
if cppstd.startswith("gnu"):
definitions["CONAN_CMAKE_CXX_STANDARD"] = cppstd[3:]
definitions["CONAN_CMAKE_CXX_EXTENSIONS"] = "ON"
else:
definitions["CONAN_CMAKE_CXX_STANDARD"] = cppstd
definitions["CONAN_CMAKE_CXX_EXTENSIONS"] = "OFF"
definitions["CONAN_STD_CXX_FLAG"] = cppstd_flag(self._conanfile.settings)
return definitions
def _cmake_cross_build_defines(self):
os_ = self._ss("os")
arch = self._ss("arch")
os_ver_str = "os.api_level" if os_ == "Android" else "os.version"
op_system_version = self._ss(os_ver_str)
env_sn = get_env("CONAN_CMAKE_SYSTEM_NAME", "")
env_sn = {"False": False, "True": True, "": None}.get(env_sn, env_sn)
cmake_system_name = env_sn or self._forced_cmake_system_name
os_build, _, _, _ = get_cross_building_settings(self._conanfile.settings)
compiler = self._ss("compiler")
libcxx = self._ss("compiler.libcxx")
definitions = OrderedDict()
os_ver = get_env("CONAN_CMAKE_SYSTEM_VERSION", op_system_version)
toolchain_file = get_env("CONAN_CMAKE_TOOLCHAIN_FILE", "")
if toolchain_file != "":
logger.info("Setting Cross build toolchain file: %s" % toolchain_file)
definitions["CMAKE_TOOLCHAIN_FILE"] = toolchain_file
return definitions
if cmake_system_name is False:
return definitions
# System name and system version
if cmake_system_name is not True: # String not empty
definitions["CMAKE_SYSTEM_NAME"] = cmake_system_name
else: # detect if we are cross building and the system name and version
if cross_building(self._conanfile.settings): # We are cross building
if os_ != os_build:
if os_: # the_os is the host (regular setting)
definitions["CMAKE_SYSTEM_NAME"] = {"iOS": "Darwin",
"tvOS": "Darwin",
"watchOS": "Darwin",
"Neutrino": "QNX"}.get(os_, os_)
else:
definitions["CMAKE_SYSTEM_NAME"] = "Generic"
if os_ver:
definitions["CMAKE_SYSTEM_VERSION"] = os_ver
if is_apple_os(os_):
definitions["CMAKE_OSX_DEPLOYMENT_TARGET"] = os_ver
# system processor
cmake_system_processor = os.getenv("CONAN_CMAKE_SYSTEM_PROCESSOR")
if cmake_system_processor:
definitions["CMAKE_SYSTEM_PROCESSOR"] = cmake_system_processor
if definitions: # If enabled cross compile
for env_var in ["CONAN_CMAKE_FIND_ROOT_PATH",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_PROGRAM",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_LIBRARY",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_INCLUDE"]:
value = os.getenv(env_var)
if value:
definitions[env_var] = value
if self._conanfile and self._conanfile.deps_cpp_info.sysroot:
sysroot_path = self._conanfile.deps_cpp_info.sysroot
else:
sysroot_path = os.getenv("CONAN_CMAKE_FIND_ROOT_PATH", None)
if sysroot_path:
# Needs to be set here, can't be managed in the cmake generator, CMake needs
# to know about the sysroot before any other thing
definitions["CMAKE_SYSROOT"] = sysroot_path.replace("\\", "/")
# Adjust Android stuff
if str(os_) == "Android" and definitions["CMAKE_SYSTEM_NAME"] == "Android":
arch_abi_settings = tools.to_android_abi(arch)
if arch_abi_settings:
definitions["CMAKE_ANDROID_ARCH_ABI"] = arch_abi_settings
definitions["ANDROID_ABI"] = arch_abi_settings
conan_cmake_android_ndk = os.getenv("CONAN_CMAKE_ANDROID_NDK")
if conan_cmake_android_ndk:
definitions["ANDROID_NDK"] = conan_cmake_android_ndk
definitions["ANDROID_PLATFORM"] = "android-%s" % op_system_version
definitions["ANDROID_TOOLCHAIN"] = compiler
# More details about supported stdc++ libraries here:
# https://developer.android.com/ndk/guides/cpp-support.html
if libcxx:
definitions["ANDROID_STL"] = libcxx
else:
definitions["ANDROID_STL"] = 'none'
logger.info("Setting Cross build flags: %s"
% ", ".join(["%s=%s" % (k, v) for k, v in definitions.items()]))
return definitions
def _get_make_program_definition(self):
make_program = os.getenv("CONAN_MAKE_PROGRAM") or self._make_program
if make_program:
if not tools.which(make_program):
self._output.warn("The specified make program '%s' cannot be found and will be "
"ignored" % make_program)
else:
self._output.info("Using '%s' as CMAKE_MAKE_PROGRAM" % make_program)
return {"CMAKE_MAKE_PROGRAM": make_program}
return {}
def get_definitions(self):
compiler = self._ss("compiler")
compiler_version = self._ss("compiler.version")
arch = self._ss("arch")
os_ = self._ss("os")
libcxx = self._ss("compiler.libcxx")
runtime = self._ss("compiler.runtime")
build_type = self._ss("build_type")
definitions = OrderedDict()
definitions.update(runtime_definition(runtime))
definitions.update(build_type_definition(self._forced_build_type, build_type,
self._generator, self._output))
if str(os_) == "Macos":
if arch == "x86":
definitions["CMAKE_OSX_ARCHITECTURES"] = "i386"
definitions.update(self._cmake_cross_build_defines())
definitions.update(self._get_cpp_standard_vars())
definitions.update(in_local_cache_definition(self._conanfile.in_local_cache))
if compiler:
definitions["CONAN_COMPILER"] = compiler
if compiler_version:
definitions["CONAN_COMPILER_VERSION"] = str(compiler_version)
# C, CXX, LINK FLAGS
if compiler == "Visual Studio":
if self._parallel:
flag = parallel_compiler_cl_flag(output=self._output)
definitions['CONAN_CXX_FLAGS'] = flag
definitions['CONAN_C_FLAGS'] = flag
else: # arch_flag is only set for non Visual Studio
arch_flag = architecture_flag(compiler=compiler, os=os_, arch=arch)
if arch_flag:
definitions['CONAN_CXX_FLAGS'] = arch_flag
definitions['CONAN_SHARED_LINKER_FLAGS'] = arch_flag
definitions['CONAN_C_FLAGS'] = arch_flag
if self._set_cmake_flags:
definitions['CMAKE_CXX_FLAGS'] = arch_flag
definitions['CMAKE_SHARED_LINKER_FLAGS'] = arch_flag
definitions['CMAKE_C_FLAGS'] = arch_flag
if libcxx:
definitions["CONAN_LIBCXX"] = libcxx
# Shared library
try:
definitions["BUILD_SHARED_LIBS"] = "ON" if self._conanfile.options.shared else "OFF"
except ConanException:
pass
# Install to package folder
try:
if self._conanfile.package_folder:
definitions["CMAKE_INSTALL_PREFIX"] = self._conanfile.package_folder
definitions["CMAKE_INSTALL_BINDIR"] = DEFAULT_BIN
definitions["CMAKE_INSTALL_SBINDIR"] = DEFAULT_BIN
definitions["CMAKE_INSTALL_LIBEXECDIR"] = DEFAULT_BIN
definitions["CMAKE_INSTALL_LIBDIR"] = DEFAULT_LIB
definitions["CMAKE_INSTALL_INCLUDEDIR"] = DEFAULT_INCLUDE
definitions["CMAKE_INSTALL_OLDINCLUDEDIR"] = DEFAULT_INCLUDE
definitions["CMAKE_INSTALL_DATAROOTDIR"] = DEFAULT_SHARE
except AttributeError:
pass
# fpic
if not str(os_).startswith("Windows"):
fpic = self._conanfile.options.get_safe("fPIC")
if fpic is not None:
shared = self._conanfile.options.get_safe("shared")
fpic_value = "ON" if (fpic or shared) else "OFF"
definitions["CONAN_CMAKE_POSITION_INDEPENDENT_CODE"] = fpic_value
# Adjust automatically the module path in case the conanfile is using the
# cmake_find_package or cmake_find_package_multi
install_folder = self._conanfile.install_folder.replace("\\", "/")
if "cmake_find_package" in self._conanfile.generators:
definitions["CMAKE_MODULE_PATH"] = install_folder
if "cmake_find_package_multi" in self._conanfile.generators:
# The cmake_find_package_multi only works with targets and generates XXXConfig.cmake
# that require the prefix path and the module path
definitions["CMAKE_PREFIX_PATH"] = install_folder
definitions["CMAKE_MODULE_PATH"] = install_folder
definitions.update(self._get_make_program_definition())
# Disable CMake export registry #3070 (CMake installing modules in user home's)
definitions["CMAKE_EXPORT_NO_PACKAGE_REGISTRY"] = "ON"
return definitions
|
the-stack_0_16269
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: mnist-keras.py
# Author: Yuxin Wu
import tensorflow as tf
from tensorflow import keras
from tensorpack import *
from tensorpack.contrib.keras import KerasPhaseCallback
from tensorpack.dataflow import dataset
from tensorpack.utils.argtools import memoized
KL = keras.layers
"""
This is an mnist example demonstrating how to use Keras symbolic function inside tensorpack.
This way you can define models in Keras-style, and benefit from the more efficeint trainers in tensorpack.
Note: this example does not work for replicated-style data-parallel trainers.
"""
IMAGE_SIZE = 28
@memoized # this is necessary for sonnet/Keras to work under tensorpack
def get_keras_model():
M = keras.models.Sequential()
M.add(KL.Conv2D(32, 3, activation='relu', input_shape=[IMAGE_SIZE, IMAGE_SIZE, 1], padding='same'))
M.add(KL.MaxPooling2D())
M.add(KL.Conv2D(32, 3, activation='relu', padding='same'))
M.add(KL.Conv2D(32, 3, activation='relu', padding='same'))
M.add(KL.MaxPooling2D())
M.add(KL.Conv2D(32, 3, padding='same', activation='relu'))
M.add(KL.Flatten())
M.add(KL.Dense(512, activation='relu', kernel_regularizer=keras.regularizers.l2(1e-5)))
M.add(KL.Dropout(0.5))
M.add(KL.Dense(10, activation=None, kernel_regularizer=keras.regularizers.l2(1e-5)))
return M
class Model(ModelDesc):
def inputs(self):
return [tf.TensorSpec((None, IMAGE_SIZE, IMAGE_SIZE), tf.float32, 'input'),
tf.TensorSpec((None,), tf.int32, 'label')]
def build_graph(self, image, label):
image = tf.expand_dims(image, 3) * 2 - 1
M = get_keras_model()
logits = M(image)
# build cost function by tensorflow
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss
# for tensorpack validation
acc = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32)
acc = tf.reduce_mean(acc, name='accuracy')
summary.add_moving_summary(acc)
wd_cost = tf.add_n(M.losses, name='regularize_loss') # this is how Keras manage regularizers
cost = tf.add_n([wd_cost, cost], name='total_cost')
summary.add_moving_summary(cost, wd_cost)
return cost
def optimizer(self):
lr = tf.train.exponential_decay(
learning_rate=1e-3,
global_step=get_global_step_var(),
decay_steps=468 * 10,
decay_rate=0.3, staircase=True, name='learning_rate')
tf.summary.scalar('lr', lr)
return tf.train.AdamOptimizer(lr)
def get_data():
train = BatchData(dataset.Mnist('train'), 128)
test = BatchData(dataset.Mnist('test'), 256, remainder=True)
return train, test
if __name__ == '__main__':
logger.auto_set_dir()
dataset_train, dataset_test = get_data()
cfg = TrainConfig(
model=Model(),
dataflow=dataset_train,
callbacks=[
KerasPhaseCallback(True), # for Keras training
ModelSaver(),
InferenceRunner(
dataset_test,
ScalarStats(['cross_entropy_loss', 'accuracy'])),
],
max_epoch=100,
)
launch_train_with_config(cfg, QueueInputTrainer())
|
the-stack_0_16270
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, traceback
import simplejson
import openpyxl
from optparse import OptionParser
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
fscache = {}
def get_file(deviceid, version, country):
filename = "%s_%s_%s.txt" % (deviceid, version, country)
if fscache.has_key(filename) == False:
fscache[filename] = open(filename, "wt")
return fscache[filename]
all_events = None
lines = open ("part-00000.txt").readlines ()
for line in lines:
line = line.strip()
if len(line) is not 0:
json = simplejson.loads(line)
all_events = json["events"]
if all_events is not None:
fh = get_file(json["deviceid"], json["ufversion"], json["country"])
for event in all_events:
if event["eventid"] == 2:
fh.write("scrambled: %s " % (event["scrambled"]) + simplejson.dumps(event) + "\n")
elif event["eventid"] != 23:
fh.write(simplejson.dumps(event) + "\n")
else:
fh.write ("{'eventid':23, crashes: [\n")
for reboot in event["reboots"]:
fh.write (simplejson.dumps (reboot) + "\n")
fh.write ("], swfatal: [")
for fatal in event["swfatal"]:
fh.write(simplejson.dumps(fatal) + "\n")
fh.write ("], hwfatal: [")
for fatal in event["hwfatal"]:
fh.write(simplejson.dumps(fatal) + "\n")
fh.write ("]} \n")
|
the-stack_0_16271
|
"""Module providing custom logging formatters and colorization for ANSI
compatible terminals."""
import inspect
import logging
import os
import random
import threading
from logging import LogRecord
from typing import Any, List
DEFAULT_LOG_FILE = os.path.join(os.sep, 'tmp', 'dftimewolf.log')
MAX_BYTES = 5 * 1024 * 1024
BACKUP_COUNT = 3
SUCCESS = 25 # 25 is right between INFO and WARNING
def _GenerateColorSequences() -> List[str]:
"""Generates ANSI codes for 256 colors.
Works on Linux and macOS, Windows (WSL) to be confirmed.
"""
sequences = []
for i in range(0, 16):
for j in range(0, 16):
code = str(i * 16 + j)
seq = '\u001b[38;5;' + code + 'm'
sequences.append(seq)
return sequences
COLOR_SEQS = _GenerateColorSequences()
RESET_SEQ = '\u001b[0m'
# Cherrypick a few interesting values. We still want the whole list of colors
# so that modules have a good amount colors to chose from.
# pylint: disable=unbalanced-tuple-unpacking
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = COLOR_SEQS[8:16]
BG_RED = '\u001b[41m' # Red background
BG_GREEN = '\u001b[42m' # Green background
BOLD = '\u001b[1m' # Bold / bright modifier
# We'll get something like this:
# [2020-07-09 18:06:05,187] [TimesketchExporter ] INFO Sketch 23 created
LOG_FORMAT = (
'[%(asctime)s] [{0:s}{color:s}%(name)-20s{1:s}] %(levelname)-8s'
' %(message)s')
LEVEL_COLOR_MAP = {
'WARNING': YELLOW,
'SUCCESS': BOLD + BG_GREEN + BLACK,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': BOLD + BG_RED + WHITE,
'ERROR': RED
}
class WolfLogger(logging.getLoggerClass()): # type: ignore
"""Custom logging Class with a `success` logging function."""
def success(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=invalid-name
"""Logs a success message."""
super(WolfLogger, self).log(SUCCESS, *args, **kwargs)
logging.setLoggerClass(WolfLogger)
class WolfFormatter(logging.Formatter):
"""Helper class used to add color to log messages depending on their level."""
def __init__(
self,
colorize: bool = True,
random_color: bool = False,
threaded: bool = False,
**kwargs: Any) -> None:
"""Initializes the WolfFormatter object.
Args:
colorize (bool): If True, output will be colorized.
random_color (bool): If True, will colorize the module name with a random
color picked from COLOR_SEQS.
"""
self.threaded = threaded
self.colorize = colorize
kwargs['fmt'] = LOG_FORMAT.format('', '', color='')
if self.colorize:
color = ''
if random_color:
color = random.choice(COLOR_SEQS)
kwargs['fmt'] = LOG_FORMAT.format(BOLD, RESET_SEQ, color=color)
super(WolfFormatter, self).__init__(**kwargs)
def format(self, record: LogRecord) -> str:
"""Hooks the native format method and colorizes messages if needed.
Args:
record (logging.LogRecord): Native log record.
Returns:
str: The formatted message string.
"""
if self.colorize:
message = record.getMessage()
loglevel_color = LEVEL_COLOR_MAP.get(record.levelname)
if loglevel_color:
message = loglevel_color + message + RESET_SEQ
record.msg = message
if self.threaded:
stack = [i.function for i in inspect.stack()]
if 'Process' in stack:
thread_name = threading.current_thread().getName()
message = record.getMessage()
record.msg = "[{0:s}] {1:s}".format(thread_name, message)
return super(WolfFormatter, self).format(record)
|
the-stack_0_16272
|
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.lib.packet import ipv4, arp, udp, tcp, icmp
import ryu.app.blocked_ip as ip_class
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
arp_pkt = pkt.get_protocol(arp.arp)
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
if arp_pkt:
# self.logger.info("ARP packet in %s %s", arp_pkt, eth)
if(arp_pkt.src_ip in ip_class.ip_class):
self.logger.info("Blocking Arp request of blocked ip: %s", arp_pkt.src_ip)
return
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
self.logger.info("Packet in : %s %s %s %s %s", dpid, src, dst, in_port, out_port)
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
# check IP Protocol and create a match for IP
if eth.ethertype == ether_types.ETH_TYPE_IP:
ip = pkt.get_protocol(ipv4.ipv4)
srcip = ip.src
dstip = ip.dst
# print("IP packet:",ip)
if isinstance(ip, ipv4.ipv4):
print("IPV4 processing")
#print("packet details:-----------------------",ip_pkt)
if (ip.proto == 17):
print("UDP processing")
udp_pkt = pkt.get_protocol(udp.udp)
#print("packet details:-----------------------",udp_pkt)
if (ip.proto == 6):
print("TCP processing")
tcp_pkt = pkt.get_protocol(tcp.tcp)
#print("packet details:-----------------------",tcp_pkt)
if (ip.proto == 1):
print("ICMP processing")
icmp_pkt = pkt.get_protocol(icmp.icmp)
# print("packet details:-----------------------",icmp_pkt)
# self.logger.info("IP packet in %s %s %s %s", dpid, srcip, dstip, in_port)
self.logger.info("Blocked IPs : %s",ip_class.ip_class)
# if (srcip in ip_class.ip_class ):
# self.logger.info("IP %s is blocked ",srcip)
# return
match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP,
ipv4_src=srcip,
ipv4_dst=dstip,
in_port = in_port,
ip_proto = ip.proto
)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
|
the-stack_0_16274
|
"""
Copyright (C) 2020 Piek Solutions LLC
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import pyvisa as visa
import time
import sys, traceback
import re as regex
import numpy as np
class VisaInstrument:
def __init__(self, ip, gpib_address):
"""
initialize visa instrument resource
:param ip: (str) ip address of Papaya
:param gpib_address: (str) GPIB address of instrument
"""
resource_name = "TCPIP0::%s::inst%s::INSTR" % (ip, gpib_address)
print(resource_name)
rm = visa.ResourceManager()
self.instr = rm.open_resource(resource_name)
self.instr.timeout = 10000
def close(self):
self.instr.close()
def cls(self):
try:
self.instr.write('*CLS')
except ValueError:
print('*CLS fails to clear')
def _set_ESE(self, x):
try:
cmd = '*ESE ' + str(x)
self.instr.write(cmd)
except ValueError:
print ('*ESE write fails')
def _get_ESE(self, x):
try:
resp = self.instr.query('*ESE?')
self._output = float(resp)
except ValueError:
print('*ESE query fails')
return self._output
ESE = property(_get_ESE, _set_ESE, "ESE property")
def _set_SRE(self, x):
try:
cmd = '*SRE ' + str(x)
self.instr.write(cmd)
except ValueError:
print ('*SRE write fails')
def _get_SRE(self, x):
try:
resp = self.instr.query('*SRE?')
self._output = float(resp)
except ValueError:
print('*SRE query fails')
return self._output
SRE = property(_get_SRE, _set_SRE, "SRE property")
def queryIDN(self):
try:
data = self.instr.query('*IDN?')
return data
except ValueError:
print('*IDN query fails')
class Keysight_N9030B(VisaInstrument):
def getTrace(self, tra='TRACE1'):
count = 0
try:
self.instr.write('trac:data? %s' %tra)
resp = self.instr.read()
flag = '\n' in resp
while not flag:
tmp = self.instr.read()
resp += tmp
flag = '\n' in tmp
count += 1
except visa.VisaIOError:
print('error getting trace')
print(tmp)
traceback.print_exc()
sys.exit(3)
ary = resp.split(',')
dd = np.array([float(c) for c in ary])
return dd
def getTraceXY(self, tra='san1'):
count = 0
try:
self.instr.write('fetch:%s?' %tra)
resp = self.instr.read()
flag = '\n' in resp
while not flag:
tmp = self.instr.read()
resp += tmp
flag = '\n' in tmp
count += 1
except visa.VisaIOError:
print('error getting xy trace')
print(tmp)
traceback.print_exc()
sys.exit(3)
ary = resp.split(',')
dd = np.array([float(c) for c in ary])
return dd
class Anritsu_M4647A(VisaInstrument):
def sweepOnce(self):
self.instr.write('TRS;WFS;HLD')
time.sleep(11)
def readSXX(self, fmt='OS11C'):
try:
self.instr.write(fmt) # C here refers to calibrated
resp = self.instr.read()
s = regex.findall(r'^#\d+', resp)[0] # get the first elm in string instead of list
pos = int(s[1]) + 3
_num = int(s[2:len(s)]) # total number of bytes to read
resp = resp[pos:len(resp)] # remove the header
cnt = len(resp)
while cnt < _num:
tmp = self.instr.read()
cnt += len(tmp)
resp += tmp
except visa.VisaIOError:
traceback.print_exc()
sys.exit(3)
# make them into real numbers
y = resp.split('\n')
y = y[0:len(y)-1] # last element is \n
real = np.zeros(len(y), dtype=float)
imag = np.zeros(len(y), dtype=float)
for i_ in range(0, len(y)):
valstr = y[i_].split(',') # split into real and imag
real[i_] = float(valstr[0])
imag[i_] = float(valstr[1])
c = real + 1.j*imag
return c
def freq(self):
try:
self.instr.write(':sens1:freq:data?')
resp = self.instr.read()
s = regex.findall(r'^#\d+', resp)[0] # get the first elm in string instead of list
pos = int(s[1]) + 3
_num = int(s[2:len(s)]) # total number of bytes to read
resp = resp[pos:len(resp)] # remove the header
cnt = len(resp)
while cnt < _num:
tmp = self.instr.read()
cnt += len(tmp)
resp += tmp
except visa.VisaIOError:
traceback.print_exc()
sys.exit(3)
y = resp.split('\n')
y = y[0:len(y)-1] # last element is \n
val = np.array([float(c) for c in y])
return val
class Keithley_2400(VisaInstrument):
def sourcetype(self, type):
if type == 'voltage':
self.instr.write(':SOUR:FUNC VOLT')
self.instr.write(':SENS:FUNC "CURR"')
elif type == 'current':
self.instr.write(':SOUR:FUNC CURR')
self.instr.write(':SENS:FUNC "VOLT"')
def setvoltage(self, vb, curlimit=0.05):
self.instr.write(':SENS:CURR:PROT %f' % curlimit)
self.instr.write(':SOUR:VOLT:LEV %f' % vb)
def querycurrent(self):
try:
self.instr.write(':FORM:ELEM CURR')
cur = self.instr.query('READ?')
c = float(cur)
except ValueError:
print('Keithley 2400 warning: current reading error...')
print(cur)
c = -1000
return float(c)
def setcurrent(self, cur, vlimit=2):
self.instr.write(':SENS:VOLT:PROT %f' % vlimit)
self.instr.write(':SOUR:CURR:LEV %s' % cur)
def _get_output(self):
try:
resp = self.instr.query(':OUTPUT?')
self._output = float(resp)
except ValueError:
print('Keithley 2400 query fails')
return self._output
def _set_output(self, x):
try:
cmd = ':OUTPUT ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Keithley 2400 write fails')
self._output = x
output = property(_get_output, _set_output, "output property")
class Agilent_E3631(VisaInstrument):
def _get_outPutOnOff(self):
try:
resp = self.instr.query(':outp?')
self._outputOnOff = resp
except ValueError:
print('Agilent E3631 query outp fails')
return self._outputOnOff
def _set_outPutOnOff(self, x):
try:
cmd = 'outp ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 write outp fails')
self._outputOnOff = x
outputOnOff = property(_get_outPutOnOff, _set_outPutOnOff, "outputOnOff property")
def queryCurrent(self):
try:
resp=self.instr.query(':meas:curr:dc?')
except ValueError:
print('Agilent E3631 query current fails')
return float(resp)
def queryVoltage(self):
try:
resp=self.instr.query(':meas:volt:dc?')
except ValueError:
print('Agilent E3631 query voltage fails')
return float(resp)
def selectPowerSupply(self, x):
"""
select power supply instrument,
:param x: (int) 1 is P6V, 2 is P25V and 3 is N25V
:return: none
"""
try:
cmd = 'INST:NSEL ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 select power supply fails')
def setP6VSupply(self, x):
try:
# P6V is 1
self.instr.write('INST:NSEL 1')
cmd = 'volt ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 set P6V fails')
def queryP6VSetVoltage(self):
try:
# P6V is 1
self.instr.write('INST:NSEL 1')
val = self.instr.query('volt?')
except ValueError:
print('Agilent E3631 query P6V fails')
return float(val)
def setP25VSupply(self,x):
try:
# P25V is 2
self.instr.write('INST:NSEL 2')
cmd = 'volt ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 set P25V fails')
def queryP25VSetVoltage(self):
try:
# P25V is 2
self.instr.write('INST:NSEL 2')
val = self.instr.query('volt?')
except ValueError:
print('Agilent E3631 query P25V fails')
return float(val)
def setN25VSupply(self, x):
# N25V is 3
try:
self.instr.write('INST:NSEL 3')
cmd = 'volt ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent E3631 set N25V fails')
def queryN25VSetVoltage(self):
# N25V is 3
try:
self.instr.write('INST:NSEL 3')
val = self.instr.query('volt?')
except ValueError:
print('Agilent E3631 query N25V fails')
return float(val)
class Keysight_E3649A(VisaInstrument):
def _get_outputOnOff(self):
"""
query output state
:return: 0(OFF) or 1(ON)
"""
try:
resp = self.instr.query('OUTP?')
self._outputOnOff = resp.rstrip()
except ValueError:
print('Agilent E3649A query outp on/off fails')
return self._outputOnOff
def _set_outputOnOff(self, x):
"""
turn output on or off
:param x: either ON or OFF
:return: None
"""
try:
self.instr.write('OUTP ' + str(x))
except ValueError:
print('Agilent E3649A write outp on/off fails')
self._outputOnOff = x
outputOnOff = property(_get_outputOnOff, _set_outputOnOff, "outputOnOff property")
def queryCurrent(self, output_num=None):
"""
query current of selected output
:param output_num: (int) the output to query (None|1|2);
default value None uses the output previously set.
:return: (float) current
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
resp = self.instr.query('MEAS:CURR:DC?')
return float(resp)
except visa.VisaIOError or ValueError:
print('Agilent E3649A query current fails')
def setCurrent(self, curr, output_num=None):
"""
query current of selected output
:param curr: (float) the desired current level
:param output_num: (int) the output to query (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write('CURR ' + str(curr))
except visa.VisaIOError or ValueError:
print('Agilent E3649A query current fails')
def queryVoltage(self, output_num=None):
"""
query voltage of selected output
:param output_num: (int) the output to read (None|1|2);
default value None uses the output previously set.
:return: (float) voltage
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
resp = self.instr.query('MEAS:VOLT:DC?')
return float(resp)
except visa.VisaIOError or ValueError:
print('Agilent E3649A query voltage fails')
def setVoltage(self, volt, output_num=None):
"""
set voltage of selected output
:param volt: (float) the desired voltage level
:param output_num: (int) the output to set (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write('VOLT ' + str(volt))
except visa.VisaIOError or ValueError:
print('Agilent E3649A set voltage fails')
def selectOutput(self, output_num):
"""
select which output to modify
:param output_num: (int) the output to modify (1|2)
:return: None
"""
try:
self.instr.write('INST:NSEL ' + str(output_num))
except visa.VisaIOError:
print('Agilent E3649A select output fails')
def queryOutputRange(self, output_num=None):
"""
query range setting of selected output
:param output_num: (int) the output to read (None|1|2);
default value None uses the output previously set.
:return: (str) P35V or P60V
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
resp = self.instr.query(':VOLT:RANG?')
return resp.rstrip()
except visa.VisaIOError:
print('Agilent E3649A query output range fails')
def setOutputRange(self, volt_range, output_num=None):
"""
set voltage range of selected output
:param volt_range: the voltage range to set output to (P35V|LOW|P60V|HIGH)
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:RANG ' + str(volt_range))
except visa.VisaIOError:
print('Agilent E3649A set output voltage fails')
def setOutputLow(self, output_num=None):
"""
set voltage range of selected output to 35V
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:RANG LOW')
except visa.VisaIOError:
print('Agilent E3649A set output voltage LOW fails')
def setOutputHigh(self, output_num=None):
"""
set voltage range of output to 60V
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:RANG HIGH')
except visa.VisaIOError:
print('Agilent E3649A set output voltage HIGH fails')
def enableVoltageProtection(self, enable=1, output_num=None):
"""
enable or disable the overvoltage protection function.
:param enable: (0|1|OFF|ON)
:param output_num: output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:PROT:STAT ' + str(enable))
except visa.VisaIOError:
print('Agilent E3649A enable voltage protection fails')
def setVoltageProtection(self, volt, output_num=None):
"""
set the voltage level at which the overvoltage protection
(OVP) circuit will trip.
:param volt: voltage level, 'MIN', or 'MAX'
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: None
"""
try:
if output_num:
self.instr.write('INST:NSEL ' + str(output_num))
self.instr.write(':VOLT:PROT ' + str(volt))
except visa.VisaIOError:
print('Agilent E3649A set output voltage protection fails')
def queryVoltageProtection(self, output_num=None):
"""
query the protection state and voltage level at which the
overvoltage protection (OVP) circuit will trip.
:param output_num: (int) the output to modify (None|1|2);
default value None uses the output previously set.
:return: tuple (int, str) consisting of enable 0 (OFF) or 1 (ON)
and the voltage trip level.
"""
try:
ena = self.instr.query('VOLT:PROT:STAT?')
level = self.instr.query('VOLT:PROT?')
return ena.rstrip(), level.rstrip()
except visa.VisaIOError:
print('Agilent E3649A query output voltage protection fails')
class Agilent_33401(VisaInstrument):
def acVoltage(self):
try:
self.instr.write(':meas:volt:ac?')
resp = self.instr.read()
return float(resp)
except ValueError:
print('Agilent 33401 query ac volt fails')
def acCurrent(self):
try:
self.instr.write(':meas:curr:ac?')
resp = self.instr.read()
return float(resp)
except ValueError:
print('Agilent 33401 query ac curr fails')
def dcVoltage(self):
try:
self.instr.write(':meas:volt:dc?')
resp = self.instr.read()
return float(resp)
except ValueError:
print('Agilent 33401 query dc volt fails')
def dcCurrent(self):
try:
self.instr.write(':meas:curr:dc?')
resp = self.instr.read()
return float(resp)
except ValueError:
print('Agilent 33401 query dc curr fails')
class Keithley_2510(VisaInstrument):
def querytemp(self):
try:
self.instr.write(':MEAS:TEMP?')
temp = self.instr.read()
t = float(temp)
except ValueError:
print('Keithley 2510 warning: temp read error...')
print(temp)
t = -1000
return float(t)
def settemp(self, setT='25'):
self.instr.write(':SOUR:TEMP %f' % setT)
def _get_output(self):
try:
resp = self.instr.query(':OUTPUT?')
self._output = float(resp)
except ValueError:
print('Keithley 2510 query outp fails')
return self._output
def _set_output(self, x):
try:
cmd = ':OUTPUT ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Keithley 2510 write outp fails')
self._output = x
output = property(_get_output, _set_output, "output property")
class Newport_3150(VisaInstrument):
def querytemp(self):
temp = self.instr.query(':TEC:T?')
try:
t = float(temp)
except ValueError:
print('Newport 3150 warning: temp read error...')
print(temp)
t = -1000
return float(t)
def settemp(self, setT='25'):
self.instr.write(':TEC:T %f' % setT)
class Agilent_8163(VisaInstrument):
def queryIDN(self):
try:
resp = self.instr.query('*IDN?')
except ValueError:
print('Agilent 8163 fails query')
return resp
def querypower(self):
try:
opt = self.instr.query('READ:POW?')
except ValueError:
print('Agilent 8163 fails query')
return float(opt)
class Keysight_Dca(VisaInstrument):
def initialize(self): # initiallize for PAM4 measurement
pass
def get_er(self, source='1', ch='2A'):
cmd = ':MEASure:EYE:OER:SOURce'+source+' CHAN'+ch
self.instr.write(cmd)
try:
er = self.instr.query(':MEASure:EYE:OER?')
return float(er)
except ValueError:
print('Keysight dca error')
def getOMA(self, source='1', ch='2A'):
cmd = ':MEASure:EYE:OOMA:SOURce'+source+' CHAN'+ch
self.instr.write(cmd)
try:
oma = self.instr.query(':MEASure:EYE:OOMA?')
return float(oma)
except ValueError:
print('Keysight dca error')
def getRLM(self, source='1', ch='2A'):
cmd = ':MEASure:EYE:PAM:LINearity:SOURce'+source+' CHAN'+ch
self.instr.write(cmd)
try:
rlm = self.instr.query(':MEASure:EYE:PAM:LINearity?')
return float(rlm)
except ValueError:
print('Keysight dca error')
def autoscale(self):
self.instr.write(':SYSTem:AUToscale')
try:
self.instr.query('*OPC?')
except ValueError:
print('Keysight dca error')
def clear(self):
self.instr.write(':ACQuire:CDISplay')
try:
self.instr.query('*OPC?')
except ValueError:
print('Keysight dca error')
def run(self):
self.instr.write(':ACQuire:RUN')
class Agilent_86142(VisaInstrument):
def _get_startWavelength(self):
try:
resp = self.instr.query(':sens:wav:star?')
self._startWavelength = float(resp)
except ValueError:
print('Agilent 86142 query fails')
return self._startWavelength
def _set_startWavelength(self, x):
try:
cmd = ':sens:wav:star ' + str(x)
self.instr.write(cmd)
self._startWavelength = x
except visa.VisaIOError:
print('Agilent 86142 write fails')
startWavelength = property(_get_startWavelength, _set_startWavelength, "startWavelength property")
def _get_stopWavelength(self):
try:
resp = self.instr.query(':sens:wav:stop?')
self._startWavelength = float(resp)
except ValueError:
print('Agilent 86142 query fails')
return self._startWavelength
def _set_stopWavelength(self, x):
try:
cmd = ':sens:wav:stop ' + str(x)
self.instr.write(cmd)
self._stopWavelength = x
except visa.VisaIOError:
print('Agilent 86142 write fails')
stopWavelength = property(_get_stopWavelength, _set_stopWavelength, "stopWavelength property")
def _get_traceLength(self):
try:
resp = self.instr.query(':SENS:SWE:POIN?')
self._traceLength = float(resp)
except ValueError:
print('Agilent 86142 query fails')
return self._traceLength
def _set_traceLength(self, x):
try:
cmd = ':SENS:SWE:POIN ' + str(x)
self.instr.write(cmd)
self._traceLength = x
except ValueError:
print('Agilent 86142 write fails')
traceLength = property(_get_traceLength, _set_traceLength, "traceLength property")
def getTrace(self):
tmp = ''
try:
self.instr.write('form ascii')
self.instr.write('trac? tra')
resp = self.instr.read()
flag = '\n' in resp
count = 0
while not flag:
tmp = self.instr.read()
resp += tmp
flag = '\n' in tmp
count += 1
except visa.VisaIOError:
print('error')
print(tmp)
traceback.print_exc()
sys.exit(3)
return resp
def getTrace1(self, pts):
tmp = ''
elmcount = []
count = 0
itr=0
try:
self.instr.write('form ascii')
self.instr.write('trac? tra')
resp = self.instr.read()
count += len(resp.split(','))
while count < pts:
tmp = self.instr.read()
count += len(tmp.split(','))
elmcount.append(count)
resp += tmp
itr += 1
except visa.VisaIOError:
print('error')
print(tmp)
traceback.print_exc()
sys.exit(3)
return resp
def getTraceBin(self):
try:
self.instr.write('form real32')
self.instr.write('trac? tra')
resp = self.instr.read()
return resp
except ValueError:
print('Agilent 86142 write fails')
class JDSU_HA9(VisaInstrument):
_attenuation = 0
_beamIsBlocked = 0
def _get_attenuation(self):
try:
resp = self.instr.query('att?')
self._attenuation = float(resp)
except ValueError:
print('JDSU HA9 query fails')
return self._attenuation
def _set_attenuation(self, x):
try:
cmd = 'att ' + str(x)
self.instr.write(cmd)
self._attenuation = x
except ValueError:
print('JDSU HA9 write fails')
attenuation = property(_get_attenuation, _set_attenuation, "attenuation property")
def _get_beamIsBlocked(self):
try:
resp = self.instr.query('D?')
self._beamIsBlocked = int(resp)
except ValueError:
print('JDSU HA9 query fails')
return self._beamIsBlocked
def _set_beamIsBlocked(self, x):
try:
cmd = 'D ' + str(int(x))
self.instr.write(cmd)
self._beamIsBlocked = int(x)
except ValueError:
print('JDSU HA9 write fails')
beamIsBlocked = property(_get_beamIsBlocked, _set_beamIsBlocked, "beamIsBlock property")
class N9020A_SpectrumAnalyzer(VisaInstrument):
_inputCoupling = 'DC' # default
_bandwidthResolution_MHz = 0.5
_bandwidthVideo_MHz = 10
_sweepPoints = 1001
_startFreqMHz = 10e-3
_stopFreqMHz = 1350
_traceAve = 1
_contSweep = 0
def _set_contSweep(self, x=1):
try:
cmd = ':INIT:CONT ' + str(x)
self.instr.write(cmd)
self._contSweep = str(x)
except ValueError:
print('N9020A fails to set cont sweep config')
def _get_contSweep(self):
try:
resp = self.instr.query(':INIT:CONT?')
self._contSweep=resp
except ValueError:
print('N9020A fails to get cont sweep config')
return self._contSweep
contSweep = property(_get_contSweep, _set_contSweep, 'input coupling property')
def _set_inputCoupling(self, x='DC'):
try:
cmd = 'INPut:COUPling ' + str(x)
self.instr.write(cmd)
self._inputCoupling = str(x)
except ValueError:
print('N9020A fails to set input coupling')
def _get_inputCoupling(self):
try:
resp = self.instr.query('INP:COUP?')
self._inputCoupling = resp
except ValueError:
print('N9020A fails to get input coupling')
return self._inputCoupling
inputCoupling = property(_get_inputCoupling, _set_inputCoupling, 'input coupling property')
def _set_bandwidthResolution_MHz(self,x=0.5):
try:
cmd = 'BANDWIDTH:RESOLUTION ' + str(x) + ' MHZ'
self.instr.write(cmd)
self._bandwidthResolution_MHz = float(x)
except ValueError:
print('N9020A fails to set bandwidth resolution')
def _get_bandwidthResolution_MHz(self):
try:
resp = self.instr.query('BANDWIDTH:RESOLUTION?')
self._bandwidthResolution_MHz = float(resp)/1e6 # in MHz
except ValueError:
print('N9020A fails to get bandwidth resolution')
return self._bandwidthResolution_MHz
resolutionBW_MHz = property(_get_bandwidthResolution_MHz, _set_bandwidthResolution_MHz, 'bandwidth resolution property')
def _set_bandwidthVideo_MHz(self, x=0.5):
try:
cmd = 'BANDWIDTH:VIDEO ' + str(x) + ' MHZ'
self.instr.write(cmd)
self._bandwidthResolution_MHz = float(x)
except ValueError:
print('N9020A fails to set video bandwidth')
def _get_bandwidthVideo_MHz(self):
try:
resp = self.instr.query('BANDWIDTH:VIDEO?')
self._bandwidthResolution_MHz = float(resp)/1e6 # in MHz
except ValueError:
print('N9020A fails to get video bandwidth')
return self._bandwidthResolution_MHz
videoBW_MHz = property(_get_bandwidthVideo_MHz, _set_bandwidthVideo_MHz, 'video bandwidth property')
def _set_sweepPoints(self,x=1001):
try:
cmd = 'SWEEP:POINTS ' + str(x)
self.instr.write(cmd)
self._sweepPoints = int(x)
except ValueError:
print('N9020A fails to set sweep points')
def _get_sweepPoints(self):
try:
resp = self.instr.query('SWEEP:POINTS?')
self._sweepPoints = int(resp) # in MHz
except ValueError:
print('N9020A fails to get sweep points')
return self._sweepPoints
sweepPoints = property(_get_sweepPoints, _set_sweepPoints, 'sweep points')
def _set_startFreqMHz(self,x=10e-3):
try:
cmd = 'FREQUENCY:START ' + str(x) + ' MHZ'
self.instr.write(cmd)
self._startFreqMHz = float(x)
except ValueError:
print('N9020A fails to set start frequency')
def _get_startFreqMHz(self):
try:
resp = self.instr.query('FREQUENCY:START?')
self._startFreqMHz = float(resp)/1e6 # in MHz
except ValueError:
print('N9020A fails to get stop frequency')
return self._startFreqMHz
startFreqMHz = property(_get_startFreqMHz, _set_startFreqMHz,'start frequency property')
def _set_stopFreqMHz(self, x=13.5e3):
try:
cmd = 'FREQUENCY:STOP ' + str(x) + ' MHZ'
self.instr.write(cmd)
self._stopFreqMHz = float(x)
except ValueError:
print('N9020A fails to set start frequency')
def _get_stopFreqMHz(self):
try:
resp = self.instr.query('FREQUENCY:STOP?')
self._stopFreqMHz = float(resp)/1e6 # in MHz
except ValueError:
print('N9020A fails to get stop frequency')
return self._stopFreqMHz
stopFreqMHz = property(_get_stopFreqMHz, _set_stopFreqMHz, 'start frequency property')
def _set_traceAve(self, x=1):
try:
if x >= 1:
cmd = 'ACP:AVER:COUN ' + str(x)
self.instr.write(cmd)
if x == 0:
self.instr.write('ACPower:AVERage OFF')
self._traceAve = int(x)
except ValueError:
print('N9020A fails to set trace average')
def _get_traceAve(self):
try:
resp = self.instr.query('ACPower:AVERage:COUNt?')
self._traceAve = int(resp)
except ValueError:
print('N9020A fails to get stop frequency')
return self._traceAve
traceAve = property(_get_traceAve, _set_traceAve, 'trace average')
def getTrace(self):
_points = self._get_sweepPoints()
_stopf = self._get_stopFreqMHz()
_startf = self._get_startFreqMHz()
_freq = np.linspace(_startf, _stopf, _points)
tmp = ''
try:
self.instr.write('FORMAT:TRACE:DATA ASCII')
self.instr.write('TRAC? TRACE1')
resp = self.instr.read()
flag = '\n' in resp
count = 0
while not flag:
tmp = self.instr.read()
resp += (tmp)
flag = '\n' in tmp
count += 1
except visa.VisaIOError:
print('N9020A get trace error')
print(tmp)
resp = tmp
traceback.print_exc()
sys.exit(3)
resp = resp.split(',')
y = [float(d) for d in resp]
y = np.array(y)
return _freq, y
def setMarkerPos(self,pos=0):
_points = self._get_sweepPoints()
cmd = 'calc:mark1:X:pos:cent ' + str(pos)
try:
if pos < _points:
self.instr.write(cmd)
except visa.VisaIOError:
print('N9020A write error: ' + cmd)
def getMarkerNoise(self, pos=0):
# cmd = 'CALC:MARK:FUNCNOIS'
try:
# self.instr.write(cmd)
self.setMarkerPos(pos)
val = self.instr.query('CALC:MARK:Y?')
return float(val)
except visa.VisaIOError:
print('N9020A getMarkerNoise error')
def getMarkerNoiceTrace(self):
_points = self._get_sweepPoints()
_stopf = self._get_stopFreqMHz()
_startf = self._get_startFreqMHz()
_freq = np.linspace(_startf, _stopf, _points)
try:
self.instr.write('CALC:MARK:FUNCNOIS')
_points = self._get_sweepPoints()
except visa.VisaIOError:
print('N9020A getMarkerNoiceTrace error')
# preallocate array
data = np.zeros(_points, dtype=float)
try:
for i in range(0, _points,1):
self.instr.write('calc:mark1:X:pos:cent %d' % i)
val = self.instr.query('CALC:MARK:Y?')
data[i] = float(val)
except ValueError:
print('N9020A getMarkerNoiceTrace error')
return _freq, data
def setTraceType(self, x='WRITe'):
try:
cmd = 'trace1:type %s' % x
self.instr.write(cmd)
except visa.VisaIOError:
print('N9020A trace type write error %s' % x)
def getTraceType(self):
try:
cmd = 'trace1:type?'
resp = self.instr.query(cmd)
except visa.VisaIOError:
print('N9020A trace type query error')
return resp
class Agilent_86122A(VisaInstrument):
def getFreq(self):
try:
self.instr.write(':MEAS:SCAL:POW:FREQ?')
resp = float(self.instr.read())
return resp
except visa.VisaIOError:
print('Agilent 86122A error')
def getMultipleFreq(self):
try:
self.instr.write(':MEAS:ARR:POW:FREQ?')
resp = self.instr.read()
return resp
except visa.VisaIOError:
print('Agilent 86122A error')
class Agilent_N5183B(VisaInstrument):
def _get_outPutOnOff(self):
try:
resp = self.instr.query(':outp?')
self._outputOnOff = resp
except ValueError:
print('Agilent N5183B query fails')
return self._outputOnOff
def _set_outPutOnOff(self, x):
try:
cmd = 'outp ' + str(x)
self.instr.write(cmd)
self._outputOnOff = x
except ValueError:
print('Agilent N5183B write fails')
outputOnOff = property(_get_outPutOnOff, _set_outPutOnOff, "outputOnOff property")
def setFreq(self, freq_Hz=1000000):
try:
cmd = ':freq ' + str(freq_Hz)
self.instr.write(cmd)
except ValueError:
print('Agilent N5183B write fails')
def getFreq(self):
try:
resp = self.instr.query(':outp?')
return float(resp)
except ValueError:
print('Agilent N5183B write fails')
def setPowerLevel(self, pow_dBm=-20.0):
try:
cmd = ':pow:lev %d' % pow_dBm
self.instr.write(cmd)
except ValueError:
print('Agilent N5183B write fails')
def getPowerLevel(self):
try:
cmd = ':pow:lev?'
resp = self.instr.query(cmd)
return float(resp)
except ValueError:
print('Agilent N5183B query fails')
class SRS(VisaInstrument):
_pidPolarity = 0
_pidLoop = 0
def PIDConnect(self):
try:
self.instr.write('CONN 7, \"ZZZ\"')
time.sleep(1)
except ValueError:
print('SRS Connect fails')
def PIDDiscoonect(self):
try:
self.instr.write('\"ZZZ\"')
except ValueError:
print('SRS Disconnect fails')
def _PIDPolaritySet(self, pol=0):
try:
self.instr.write('APOL %d' % int(pol))
self.instr._pidPolarity = int(pol)
except ValueError:
print('SRS APOL set fails')
def _PIDPolarityGet(self):
try:
resp = self.instr.query('APOL?')
self._pidPolarity = int(resp)
except ValueError:
print('SRS APOL set fails')
return self._pidPolarity
PIDPolarity = property(_PIDPolarityGet, _PIDPolaritySet, 'PID Polarity')
def _setPIDLoop(self, loop=0):
try:
self.instr.write('AMAN %d' % int(loop))
except ValueError:
print('SRS AMAN set fails')
self._pidLoop = int(loop)
def _getPIDLoop(self):
try:
resp = self.instr.query('AMAN?')
self._pidLoop = int(resp)
except ValueError:
print('SRS AMAN get fails')
return self._pidLoop
PIDLoop = property(_getPIDLoop, _setPIDLoop, 'PID Loop on/off')
def setMout(self, val=0):
cmd = 'MOUT %f' % val
print('setting Mout %s' % cmd)
try:
self.instr.write(cmd)
except ValueError:
print('SRS MOUT set fails')
def getMout(self):
try:
resp = self.instr.query('MOUT?')
return float(resp)
except ValueError:
print('SRS MOUT get fails')
class Agilent8163A(VisaInstrument):
def setVoa(self, x):
try:
cmd = ':INPUT1:CHAN1:att ' + str(x)
self.instr.write(cmd)
except ValueError:
print('Agilent 8163A write fails')
def getVoa(self):
try:
cmd = ':INPUT1:CHAN1:att?'
val = self.instr.query(cmd)
return float(val)
except ValueError:
print('Agilent 8163A query fails')
def getOpm(self, ch):
try:
self.instr.write('*CLS')
power = self.instr.query(':FETC2:CHAN{}:POW? '.format(ch))
return float(power)
except ValueError:
print('Agilent 8163A query error')
def initOpm(self):
try:
self.instr.write('*CLS')
for i in range(1, 2):
self.write(':SENS2:CHAN{}:POW:WAV 1550.0nm'.format(i))
self.write(':SENS2:CHAN{}:POW:ATIM 200ms'.format(i))
except ValueError:
print('Agilent 8163A write error')
|
the-stack_0_16275
|
# -*- coding: utf-8 -*-
def main():
import sys
sys.setrecursionlimit(10 ** 8)
input = sys.stdin.readline
n = int(input())
graph = [[] for _ in range(n)]
ab = list()
dp = [0] * n
for _ in range(n - 1):
ai, bi = map(int, input().split())
ai -= 1
bi -= 1
ab.append((ai, bi))
graph[ai].append(bi)
graph[bi].append(ai)
def dfs(pos, parent=-1):
dp[pos] = 1
for v in graph[pos]:
if v == parent:
continue
dfs(v, pos)
dp[pos] += dp[v]
dfs(0)
ans = 0
for ai, bi in ab:
count = min(dp[ai], dp[bi])
ans += count * (n - count)
print(ans)
if __name__ == "__main__":
main()
|
the-stack_0_16276
|
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', # noqa
backbone=dict(
type='VisionTransformer',
img_size=(512, 512),
patch_size=16,
in_channels=3,
embed_dims=768,
num_layers=12,
num_heads=12,
mlp_ratio=4,
out_indices=(2, 5, 8, 11),
qkv_bias=True,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
with_cls_token=True,
norm_cfg=dict(type='LN', eps=1e-6),
act_cfg=dict(type='GELU'),
norm_eval=False,
interpolate_mode='bicubic'),
neck=dict(
type='MultiLevelNeck',
in_channels=[768, 768, 768, 768],
out_channels=768,
scales=[4, 2, 1, 0.5]),
decode_head=dict(
type='UPerHead',
in_channels=[768, 768, 768, 768],
in_index=[0, 1, 2, 3],
pool_scales=(1, 2, 3, 6),
channels=512,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=768,
in_index=3,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole')) # yapf: disable
|
the-stack_0_16277
|
#!/usr/bin/env python3
import argparse
import logging
import mariadb
import yaml
import sys
# Setup logging
# Create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Setup console logging
logging_console_handler = logging.StreamHandler()
logging_formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
logging_console_handler.setFormatter(logging_formatter)
logger.addHandler(logging_console_handler)
def main(argv):
args_parser = argparse.ArgumentParser(description="Setup a global MySQL metadata database for CLP.")
args_parser.add_argument("--config-file", required=True, help="Metadata database basic config file.")
parsed_args = args_parser.parse_args(argv[1:])
config_file_path = parsed_args.config_file
with open(config_file_path, 'r') as f:
config = yaml.safe_load(f)
if config is None:
raise Exception(f"Unable to parse configuration from {config_file_path}.")
required_keys = ["host", "port", "username", "password", "name"]
for key in required_keys:
if key not in config:
raise Exception(f"'{key}' missing from config file.")
host = config["host"]
port = config["port"]
username = config["username"]
password = config["password"]
db_name = config["name"]
table_prefix = config["table_prefix"]
try:
mysql_conn = mariadb.connect(host=host, port=port, username=username, password=password)
mysql_cursor = mysql_conn.cursor()
except mariadb.Error as err:
logger.error("Failed to connect - {}".format(err.msg))
return -1
try:
# Create database
try:
mysql_cursor.execute("CREATE DATABASE IF NOT EXISTS {} DEFAULT CHARACTER SET 'utf8'".format(db_name))
except mariadb.Error as err:
logger.error("Failed to create database - {}".format(err.msg))
return -1
# Use database
try:
mysql_cursor.execute("USE {}".format(db_name))
except mariadb.Error as err:
logger.error("Failed to use database - {}".format(err.msg))
return -1
# Create tables
try:
mysql_cursor.execute(f"""CREATE TABLE IF NOT EXISTS `{table_prefix}archives` (
`pagination_id` BIGINT unsigned NOT NULL AUTO_INCREMENT,
`id` VARCHAR(64) NOT NULL,
`uncompressed_size` BIGINT NOT NULL,
`size` BIGINT NOT NULL,
`creator_id` VARCHAR(64) NOT NULL,
`creation_ix` INT NOT NULL,
KEY `archives_creation_order` (`creator_id`,`creation_ix`) USING BTREE,
UNIQUE KEY `archive_id` (`id`) USING BTREE,
PRIMARY KEY (`pagination_id`)
)""")
mysql_cursor.execute(f"""CREATE TABLE IF NOT EXISTS `{table_prefix}files` (
`id` VARCHAR(64) NOT NULL,
`orig_file_id` VARCHAR(64) NOT NULL,
`path` VARCHAR(12288) NOT NULL,
`begin_timestamp` BIGINT NOT NULL,
`end_timestamp` BIGINT NOT NULL,
`num_uncompressed_bytes` BIGINT NOT NULL,
`num_messages` BIGINT NOT NULL,
`archive_id` VARCHAR(64) NOT NULL,
KEY `files_path` (`path`(768)) USING BTREE,
KEY `files_archive_id` (`archive_id`) USING BTREE,
PRIMARY KEY (`id`)
) ROW_FORMAT=DYNAMIC""")
except mariadb.Error as err:
logger.error("Failed to create table - {}".format(err.msg))
return -1
mysql_conn.commit()
finally:
mysql_cursor.close()
mysql_conn.close()
return 0
if "__main__" == __name__:
sys.exit(main(sys.argv))
|
the-stack_0_16280
|
import uuid
import arrow
def is_uuid(data):
"""Check is data is a valid uuid. If data is a list,
checks if all elements of the list are valid uuids"""
temp = [data] if not isinstance(data, list) else data
for i in temp:
try:
uuid.UUID(str(i), version=4)
except ValueError:
return False
return True
def to_list(obj):
""" Return a list containing obj if obj is not already an iterable"""
try:
iter(obj)
return obj
except TypeError:
return [obj]
def get_start_end_ts(day=None):
if not day:
# yesterday at midnight
date = arrow.utcnow().shift(days=-1).floor("day")
else:
# given day, at midnight (arrow works in UTC by default)
date = arrow.get(day)
start = date.timestamp
end = date.ceil("day").timestamp
return start, end
|
the-stack_0_16282
|
# -*- coding:utf-8 -*-
from __future__ import print_function
from setuptools import setup, find_packages
from glob import glob
import pyprobar
with open(glob('requirements.*')[0], encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
with open("README.md", "r", encoding='utf-8') as fr:
long_description = fr.read()
setup(
name = pyprobar.__name__ ,
version=pyprobar.__version__,
packages = find_packages(),
include_package_data = True,
description = " An easy-to-use and colorful progress bar for python." ,
long_description=long_description,
long_description_content_type="text/markdown",
author = "K.y" ,
author_email="[email protected]",
url = "https://github.com/beidongjiedeguang/python-progress-bar" ,
license = "MIT" ,
install_requires=install_requires,
classifiers=[
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords=[
'Python Utils',
'Machine Learning',
'Neural Networks',
'Natural Language Processing',
'Computer Vision'
]
)
|
the-stack_0_16284
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SourceProperties(Model):
"""The properties of the source code repository.
All required parameters must be populated in order to send to Azure.
:param source_control_type: Required. The type of source control service.
Possible values include: 'Github', 'VisualStudioTeamService'
:type source_control_type: str or
~azure.mgmt.containerregistry.v2018_09_01.models.SourceControlType
:param repository_url: Required. The full URL to the source code
repository
:type repository_url: str
:param branch: The branch name of the source code.
:type branch: str
:param source_control_auth_properties: The authorization properties for
accessing the source code repository and to set up
webhooks for notifications.
:type source_control_auth_properties:
~azure.mgmt.containerregistry.v2018_09_01.models.AuthInfo
"""
_validation = {
'source_control_type': {'required': True},
'repository_url': {'required': True},
}
_attribute_map = {
'source_control_type': {'key': 'sourceControlType', 'type': 'str'},
'repository_url': {'key': 'repositoryUrl', 'type': 'str'},
'branch': {'key': 'branch', 'type': 'str'},
'source_control_auth_properties': {'key': 'sourceControlAuthProperties', 'type': 'AuthInfo'},
}
def __init__(self, **kwargs):
super(SourceProperties, self).__init__(**kwargs)
self.source_control_type = kwargs.get('source_control_type', None)
self.repository_url = kwargs.get('repository_url', None)
self.branch = kwargs.get('branch', None)
self.source_control_auth_properties = kwargs.get('source_control_auth_properties', None)
|
the-stack_0_16285
|
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""TensorFlow custom ops builder.
"""
import os
import re
import uuid
import hashlib
import tempfile
import shutil
import tensorflow as tf
from tensorflow.python.client import device_lib # pylint: disable=no-name-in-module
#----------------------------------------------------------------------------
# Global options.
cuda_cache_path = os.path.join(os.path.dirname(__file__), '_cudacache')
cuda_cache_version_tag = 'v1'
do_not_hash_included_headers = False # Speed up compilation by assuming that headers included by the CUDA code never change. Unsafe!
verbose = True # Print status messages to stdout.
compiler_bindir_search_path = [
'C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Tools/MSVC/14.14.26428/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Tools/MSVC/14.23.28105/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Tools/MSVC/14.29.30037/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio 14.0/vc/bin',
]
#----------------------------------------------------------------------------
# Internal helper funcs.
def _find_compiler_bindir():
for compiler_path in compiler_bindir_search_path:
if os.path.isdir(compiler_path):
return compiler_path
return None
def _get_compute_cap(device):
caps_str = device.physical_device_desc
m = re.search('compute capability: (\\d+).(\\d+)', caps_str)
major = m.group(1)
minor = m.group(2)
return (major, minor)
def _get_cuda_gpu_arch_string():
gpus = [x for x in device_lib.list_local_devices() if x.device_type == 'GPU']
if len(gpus) == 0:
raise RuntimeError('No GPU devices found')
(major, minor) = _get_compute_cap(gpus[0])
return 'sm_%s%s' % (major, minor)
def _run_cmd(cmd):
with os.popen(cmd) as pipe:
output = pipe.read()
status = pipe.close()
if status is not None:
raise RuntimeError('NVCC returned an error. See below for full command line and output log:\n\n%s\n\n%s' % (cmd, output))
def _prepare_nvcc_cli(opts):
cmd = 'nvcc ' + opts.strip()
cmd += ' --disable-warnings'
cmd += ' --include-path "%s"' % tf.sysconfig.get_include()
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'protobuf_archive', 'src')
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'com_google_absl')
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'eigen_archive')
compiler_bindir = _find_compiler_bindir()
if compiler_bindir is None:
# Require that _find_compiler_bindir succeeds on Windows. Allow
# nvcc to use whatever is the default on Linux.
if os.name == 'nt':
raise RuntimeError('Could not find MSVC/GCC/CLANG installation on this computer. Check compiler_bindir_search_path list in "%s".' % __file__)
else:
cmd += ' --compiler-bindir "%s"' % compiler_bindir
cmd += ' 2>&1'
return cmd
#----------------------------------------------------------------------------
# Main entry point.
_plugin_cache = dict()
def get_plugin(cuda_file):
cuda_file_base = os.path.basename(cuda_file)
cuda_file_name, cuda_file_ext = os.path.splitext(cuda_file_base)
# Already in cache?
if cuda_file in _plugin_cache:
return _plugin_cache[cuda_file]
# Setup plugin.
if verbose:
print('Setting up TensorFlow plugin "%s": ' % cuda_file_base, end='', flush=True)
try:
# Hash CUDA source.
md5 = hashlib.md5()
with open(cuda_file, 'rb') as f:
md5.update(f.read())
md5.update(b'\n')
# Hash headers included by the CUDA code by running it through the preprocessor.
if not do_not_hash_included_headers:
if verbose:
print('Preprocessing... ', end='', flush=True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + cuda_file_ext)
_run_cmd(_prepare_nvcc_cli('"%s" --preprocess -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir)))
with open(tmp_file, 'rb') as f:
bad_file_str = ('"' + cuda_file.replace('\\', '/') + '"').encode('utf-8') # __FILE__ in error check macros
good_file_str = ('"' + cuda_file_base + '"').encode('utf-8')
for ln in f:
if not ln.startswith(b'# ') and not ln.startswith(b'#line '): # ignore line number pragmas
ln = ln.replace(bad_file_str, good_file_str)
md5.update(ln)
md5.update(b'\n')
# Select compiler options.
compile_opts = ''
if os.name == 'nt':
compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.lib')
elif os.name == 'posix':
compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.so')
compile_opts += ' --compiler-options \'-fPIC -D_GLIBCXX_USE_CXX11_ABI=0\''
else:
assert False # not Windows or Linux, w00t?
compile_opts += ' --gpu-architecture=%s' % _get_cuda_gpu_arch_string()
compile_opts += ' --use_fast_math'
nvcc_cmd = _prepare_nvcc_cli(compile_opts)
# Hash build configuration.
md5.update(('nvcc_cmd: ' + nvcc_cmd).encode('utf-8') + b'\n')
md5.update(('tf.VERSION: ' + tf.VERSION).encode('utf-8') + b'\n')
md5.update(('cuda_cache_version_tag: ' + cuda_cache_version_tag).encode('utf-8') + b'\n')
# Compile if not already compiled.
bin_file_ext = '.dll' if os.name == 'nt' else '.so'
bin_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + md5.hexdigest() + bin_file_ext)
if not os.path.isfile(bin_file):
if verbose:
print('Compiling... ', end='', flush=True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + bin_file_ext)
_run_cmd(nvcc_cmd + ' "%s" --shared -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir))
os.makedirs(cuda_cache_path, exist_ok=True)
intermediate_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + uuid.uuid4().hex + '_tmp' + bin_file_ext)
shutil.copyfile(tmp_file, intermediate_file)
os.rename(intermediate_file, bin_file) # atomic
# Load.
if verbose:
print('Loading... ', end='', flush=True)
plugin = tf.load_op_library(bin_file)
# Add to cache.
_plugin_cache[cuda_file] = plugin
if verbose:
print('Done.', flush=True)
return plugin
except:
if verbose:
print('Failed!', flush=True)
raise
#----------------------------------------------------------------------------
|
the-stack_0_16286
|
from __future__ import (
absolute_import,
unicode_literals,
)
import abc
from typing import (
Dict,
FrozenSet,
Type,
)
import six
__all__ = (
'Serializer',
)
class _SerializerMeta(abc.ABCMeta):
_mime_type_to_serializer_map = {} # type: Dict[six.text_type, Type[Serializer]]
_all_supported_mime_types = frozenset() # type: FrozenSet[six.text_type]
def __new__(mcs, name, bases, body):
# Don't allow multiple inheritance as it mucks up mime-type collection
if len(bases) != 1:
raise ValueError('You cannot use multiple inheritance with Serializers')
cls = super(_SerializerMeta, mcs).__new__(mcs, name, bases, body)
if bases and bases[0] is not object:
if not issubclass(cls, Serializer):
raise TypeError('The internal _SerializerMeta is only valid on Serializers')
if not cls.mime_type or not cls.mime_type.strip():
raise ValueError('All serializers must have a non-null, non-blank MIME type')
if cls.mime_type in mcs._all_supported_mime_types:
raise ValueError('Another serializer {cls} already supports mime type {mime_type}'.format(
cls=mcs._mime_type_to_serializer_map[cls.mime_type],
mime_type=cls.mime_type,
))
mcs._mime_type_to_serializer_map[cls.mime_type] = cls
mcs._all_supported_mime_types = frozenset(mcs._mime_type_to_serializer_map.keys())
return cls
@property
def all_supported_mime_types(cls): # type: () -> FrozenSet[six.text_type]
"""
Return all mime types supported by all implementations of `Serializer`.
:return: A frozen set of mime types.
"""
return cls._all_supported_mime_types
@six.add_metaclass(_SerializerMeta)
class Serializer(object):
"""
The mime type that this serializer supports.
"""
mime_type = None # type: six.text_type
@classmethod
def resolve_serializer(cls, mime_type): # type: (six.text_type) -> Serializer
"""
Given the requested mime type, return an initialized `Serializer` that understands that mime type.
:param mime_type: The mime type for which to get a compatible `Serializer`
:return: A compatible `Serializer`.
:raises: ValueError if there is no `Serializer` that understands this mime type.
"""
if mime_type not in cls.all_supported_mime_types:
raise ValueError('Mime type {} is not supported'.format(mime_type))
return cls._mime_type_to_serializer_map[mime_type]()
@abc.abstractmethod
def dict_to_blob(self, message_dict): # type: (Dict) -> six.binary_type
"""
Take a message in the form of a dict and return a serialized message in the form of bytes (string).
:param message_dict: The message to serialize into a blob.
:return: The serialized blob.
"""
@abc.abstractmethod
def blob_to_dict(self, blob): # type: (six.binary_type) -> Dict
"""
Take a serialized message in the form of bytes (string) and return a dict.
:param blob: The blob to deserialize into a message
:return: The deserialized message.
"""
|
the-stack_0_16287
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
import collections
import copy
import random
import threading
import unittest
import weakref
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python import pywrap_sanitizers
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def test_assert_ops_in_graph(self):
with ops.Graph().as_default():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
@test_util.run_deprecated_v1
def test_session_functions(self):
with self.test_session() as sess:
sess_ref = weakref.ref(sess)
with self.cached_session(graph=None, config=None) as sess2:
# We make sure that sess2 is sess.
assert sess2 is sess
# We make sure we raise an exception if we use cached_session with
# different values.
with self.assertRaises(ValueError):
with self.cached_session(graph=ops.Graph()) as sess2:
pass
with self.assertRaises(ValueError):
with self.cached_session(force_gpu=True) as sess2:
pass
# We make sure that test_session will cache the session even after the
# with scope.
assert not sess_ref()._closed
with self.session() as unique_sess:
unique_sess_ref = weakref.ref(unique_sess)
with self.session() as sess2:
assert sess2 is not unique_sess
# We make sure the session is closed when we leave the with statement.
assert unique_sess_ref()._closed
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegex(AssertionError,
r"^Found unexpected node '{{node seven}}"):
test_util.assert_equal_graph_def(def_57, def_empty)
def test_assert_equal_graph_def_hash_table(self):
def get_graph_def():
with ops.Graph().as_default() as g:
x = constant_op.constant([2, 9], name="x")
keys = constant_op.constant([1, 2], name="keys")
values = constant_op.constant([3, 4], name="values")
default = constant_op.constant(-1, name="default")
table = lookup_ops.StaticHashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default)
_ = table.lookup(x)
return g.as_graph_def()
def_1 = get_graph_def()
def_2 = get_graph_def()
# The unique shared_name of each table makes the graph unequal.
with self.assertRaisesRegex(AssertionError, "hash_table_"):
test_util.assert_equal_graph_def(def_1, def_2,
hash_table_shared_name=False)
# That can be ignored. (NOTE: modifies GraphDefs in-place.)
test_util.assert_equal_graph_def(def_1, def_2,
hash_table_shared_name=True)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testIsMklEnabled(self):
# This test doesn't assert anything.
# It ensures the py wrapper function is generated correctly.
if test_util.IsMklEnabled():
print("MKL is enabled")
else:
print("MKL is disabled")
@test_util.disable_asan("Skip test if ASAN is enabled.")
def testDisableAsan(self):
self.assertFalse(pywrap_sanitizers.is_asan_enabled())
@test_util.disable_msan("Skip test if MSAN is enabled.")
def testDisableMsan(self):
self.assertFalse(pywrap_sanitizers.is_msan_enabled())
@test_util.disable_tsan("Skip test if TSAN is enabled.")
def testDisableTsan(self):
self.assertFalse(pywrap_sanitizers.is_tsan_enabled())
@test_util.disable_ubsan("Skip test if UBSAN is enabled.")
def testDisableUbsan(self):
self.assertFalse(pywrap_sanitizers.is_ubsan_enabled())
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegex(AssertionError, r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
@test_util.run_in_graph_and_eager_modes
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
with ops.Graph().as_default():
node_def = ops._NodeDef("IntOutput", "name")
node_def_orig = ops._NodeDef("IntOutput", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(),
original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
@test_util.run_in_graph_and_eager_modes
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
@parameterized.named_parameters(
dict(testcase_name="tensors", ragged_tensors=False),
dict(testcase_name="ragged_tensors", ragged_tensors=True))
@test_util.run_in_graph_and_eager_modes
def testAllCloseTensors(self, ragged_tensors: bool):
a_raw_data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = constant_op.constant(a_raw_data)
b = math_ops.add(1, constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
if ragged_tensors:
a = ragged_tensor.RaggedTensor.from_tensor(a)
b = ragged_tensor.RaggedTensor.from_tensor(b)
self.assertAllClose(a, b)
self.assertAllClose(a, a_raw_data)
a_dict = {"key": a}
b_dict = {"key": b}
self.assertAllClose(a_dict, b_dict)
x_list = [a, b]
y_list = [a_raw_data, b]
self.assertAllClose(x_list, y_list)
@test_util.run_in_graph_and_eager_modes
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
@test_util.run_in_graph_and_eager_modes
def testAllCloseList(self):
with self.assertRaisesRegex(AssertionError, r"not close dif"):
self.assertAllClose([0], [1])
@test_util.run_in_graph_and_eager_modes
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegex(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegex(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNamedtuples(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
my_named_tuple = collections.namedtuple("MyNamedTuple", ["a", "b", "c"])
# Identity.
self.assertAllClose(expected, my_named_tuple(a=a, b=b, c=c))
self.assertAllClose(
my_named_tuple(a=a, b=b, c=c), my_named_tuple(a=a, b=b, c=c))
@test_util.run_in_graph_and_eager_modes
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegex(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegex(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
@test_util.run_in_graph_and_eager_modes
def testAllCloseListOfNamedtuples(self):
my_named_tuple = collections.namedtuple("MyNamedTuple", ["x", "y"])
l1 = [
my_named_tuple(x=np.array([[2.3, 2.5]]), y=np.array([[0.97, 0.96]])),
my_named_tuple(x=np.array([[3.3, 3.5]]), y=np.array([[0.98, 0.99]]))
]
l2 = [
([[2.3, 2.5]], [[0.97, 0.96]]),
([[3.3, 3.5]], [[0.98, 0.99]]),
]
self.assertAllClose(l1, l2)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNestedStructure(self):
a = {"x": np.ones((3, 2, 4)) * 7, "y": (2, [{"nested": {"m": 3, "n": 4}}])}
self.assertAllClose(a, a)
b = copy.deepcopy(a)
self.assertAllClose(a, b)
# Test mismatched values
b["y"][1][0]["nested"]["n"] = 4.2
with self.assertRaisesRegex(AssertionError,
r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
self.assertAllClose(a, b)
@test_util.run_in_graph_and_eager_modes
def testAssertDictEqual(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
d = "testing123"
expected = {"a": a, "b": b, "c": c, "d": d}
actual = {"a": a, "b": b, "c": constant_op.constant(c), "d": d}
self.assertDictEqual(expected, expected)
self.assertDictEqual(expected, actual)
@test_util.run_in_graph_and_eager_modes
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
@test_util.skip_if(True) # b/117665998
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
@test_util.run_in_graph_and_eager_modes
def testAssertAllCloseAccordingToType(self):
# test plain int
self.assertAllCloseAccordingToType(1, 1, rtol=1e-8, atol=1e-8)
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-8], dtype=dtypes.float64),
constant_op.constant([2e-8], dtype=dtypes.float64),
rtol=1e-8,
atol=1e-8)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-7], dtype=dtypes.float32),
constant_op.constant([2e-7], dtype=dtypes.float32),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-4], dtype=dtypes.float16),
constant_op.constant([2e-4], dtype=dtypes.float16),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7,
half_rtol=1e-4,
half_atol=1e-4)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
@test_util.run_in_graph_and_eager_modes
def testAssertAllEqual(self):
i = variables.Variable([100] * 3, dtype=dtypes.int32, name="i")
j = constant_op.constant([20] * 3, dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([100] * 3, i)
self.assertAllEqual([120] * 3, k)
self.assertAllEqual([20] * 3, j)
with self.assertRaisesRegex(AssertionError, r"not equal lhs"):
self.assertAllEqual([0] * 3, k)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllEqual(self):
i = variables.Variable([100], dtype=dtypes.int32, name="i")
j = constant_op.constant([20], dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertNotAllEqual([100] * 3, i)
self.assertNotAllEqual([120] * 3, k)
self.assertNotAllEqual([20] * 3, j)
with self.assertRaisesRegex(
AssertionError, r"two values are equal at all elements.*extra message"):
self.assertNotAllEqual([120], k, msg="extra message")
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllClose(self):
# Test with arrays
self.assertNotAllClose([0.1], [0.2])
with self.assertRaises(AssertionError):
self.assertNotAllClose([-1.0, 2.0], [-1.0, 2.0])
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
self.assertNotAllClose([0.9, 1.0], x)
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.0, 1.0], x)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseRTol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], rtol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, rtol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseATol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], atol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, atol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLess(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllClose([110.0, 120.0, 130.0], z)
self.assertAllGreater(x, 95.0)
self.assertAllLess(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 95.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLessEqual(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllEqual([110.0, 120.0, 130.0], z)
self.assertAllGreaterEqual(x, 95.0)
self.assertAllLessEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 95.0)
def testAssertAllInRangeWithNonNumericValuesFails(self):
s1 = constant_op.constant("Hello, ", name="s1")
c = constant_op.constant([1 + 2j, -3 + 5j], name="c")
b = constant_op.constant([False, True], name="b")
with self.assertRaises(AssertionError):
self.assertAllInRange(s1, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(c, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(b, 0, 1)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRange(self):
x = constant_op.constant([10.0, 15.0], name="x")
self.assertAllInRange(x, 10, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_upper_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(
x, 10, 15, open_lower_bound=True, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeScalar(self):
x = constant_op.constant(10.0, name="x")
nan = constant_op.constant(np.nan, name="nan")
self.assertAllInRange(x, 5, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(nan, 5, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 1, 2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeErrorMessageEllipses(self):
x_init = np.array([[10.0, 15.0]] * 12)
x = constant_op.constant(x_init, name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 5, 10)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeDetectsNaNs(self):
x = constant_op.constant(
[[np.nan, 0.0], [np.nan, np.inf], [np.inf, np.nan]], name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 0.0, 2.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeWithInfinities(self):
x = constant_op.constant([10.0, np.inf], name="x")
self.assertAllInRange(x, 10, np.inf)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, np.inf, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInSet(self):
b = constant_op.constant([True, False], name="b")
x = constant_op.constant([13, 37], name="x")
self.assertAllInSet(b, [False, True])
self.assertAllInSet(b, (False, True))
self.assertAllInSet(b, {False, True})
self.assertAllInSet(x, [0, 13, 37, 42])
self.assertAllInSet(x, (0, 13, 37, 42))
self.assertAllInSet(x, {0, 13, 37, 42})
with self.assertRaises(AssertionError):
self.assertAllInSet(b, [False])
with self.assertRaises(AssertionError):
self.assertAllInSet(x, (42,))
def testRandomSeed(self):
# Call setUp again for WithCApi case (since it makes a new default graph
# after setup).
# TODO(skyewm): remove this when C API is permanently enabled.
with context.eager_mode():
self.setUp()
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
a_rand = random_ops.random_normal([1])
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
b_rand = random_ops.random_normal([1])
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertAllEqual(a_rand, b_rand)
@test_util.run_in_graph_and_eager_modes
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
@test_util.run_in_graph_and_eager_modes
def test_nested_tensors_evaluate(self):
expected = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
nested = {"a": constant_op.constant(1),
"b": constant_op.constant(2),
"nested": {"d": constant_op.constant(3),
"e": constant_op.constant(4)}}
self.assertEqual(expected, self.evaluate(nested))
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_util.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_util.run_in_graph_and_eager_modes()(inc) # pylint: disable=assignment-from-no-return
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
def test_get_node_def_from_graph(self):
graph_def = graph_pb2.GraphDef()
node_foo = graph_def.node.add()
node_foo.name = "foo"
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
def test_run_in_eager_and_graph_modes_test_class(self):
msg = "`run_in_graph_and_eager_modes` only supports test methods.*"
with self.assertRaisesRegex(ValueError, msg):
@test_util.run_in_graph_and_eager_modes()
class Foo(object):
pass
del Foo # Make pylint unused happy.
def test_run_in_eager_and_graph_modes_skip_graph_runs_eager(self):
modes = []
def _test(self):
if not context.executing_eagerly():
self.skipTest("Skipping in graph mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["eager"])
def test_run_in_eager_and_graph_modes_skip_eager_runs_graph(self):
modes = []
def _test(self):
if context.executing_eagerly():
self.skipTest("Skipping in eager mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["graph"])
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if context.executing_eagerly() else "graph"
class ExampleTest(test_util.TensorFlowTestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_util.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[1:2], ["run_graph"])
self.assertEqual(modes[2:], ["setup_eager", "run_eager"])
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_parameterized_keyword(self, arg):
self.assertEqual(arg, True)
@combinations.generate(combinations.combine(arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_combinations(self, arg):
self.assertEqual(arg, True)
def test_build_as_function_and_v1_graph(self):
class GraphModeAndFunctionTest(parameterized.TestCase):
def __init__(inner_self): # pylint: disable=no-self-argument
super(GraphModeAndFunctionTest, inner_self).__init__()
inner_self.graph_mode_tested = False
inner_self.inside_function_tested = False
def runTest(self):
del self
@test_util.build_as_function_and_v1_graph
def test_modes(inner_self): # pylint: disable=no-self-argument
if ops.inside_function():
self.assertFalse(inner_self.inside_function_tested)
inner_self.inside_function_tested = True
else:
self.assertFalse(inner_self.graph_mode_tested)
inner_self.graph_mode_tested = True
test_object = GraphModeAndFunctionTest()
test_object.test_modes_v1_graph()
test_object.test_modes_function()
self.assertTrue(test_object.graph_mode_tested)
self.assertTrue(test_object.inside_function_tested)
@test_util.run_in_graph_and_eager_modes
def test_consistent_random_seed_in_assert_all_equal(self):
random_seed.set_seed(1066)
index = random_ops.random_shuffle([0, 1, 2, 3, 4], seed=2021)
# This failed when `a` and `b` were evaluated in separate sessions.
self.assertAllEqual(index, index)
def test_with_forward_compatibility_horizons(self):
tested_codepaths = set()
def some_function_with_forward_compat_behavior():
if compat.forward_compatible(2050, 1, 1):
tested_codepaths.add("future")
else:
tested_codepaths.add("present")
@test_util.with_forward_compatibility_horizons(None, [2051, 1, 1])
def some_test(self):
del self # unused
some_function_with_forward_compat_behavior()
some_test(None)
self.assertEqual(tested_codepaths, set(["present", "future"]))
class SkipTestTest(test_util.TensorFlowTestCase):
def _verify_test_in_set_up_or_tear_down(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError,
["foo bar", "test message"]):
raise ValueError("test message")
try:
with self.assertRaisesRegex(ValueError, "foo bar"):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("foo bar")
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
def setUp(self):
super(SkipTestTest, self).setUp()
self._verify_test_in_set_up_or_tear_down()
def tearDown(self):
super(SkipTestTest, self).tearDown()
self._verify_test_in_set_up_or_tear_down()
def test_skip_if_error_should_skip(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("test message")
def test_skip_if_error_should_skip_with_list(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError,
["foo bar", "test message"]):
raise ValueError("test message")
def test_skip_if_error_should_skip_without_expected_message(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError):
raise ValueError("test message")
def test_skip_if_error_should_skip_without_error_message(self):
with self.assertRaises(unittest.SkipTest):
with test_util.skip_if_error(self, ValueError):
raise ValueError()
def test_skip_if_error_should_raise_message_mismatch(self):
try:
with self.assertRaisesRegex(ValueError, "foo bar"):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError("foo bar")
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
def test_skip_if_error_should_raise_no_message(self):
try:
with self.assertRaisesRegex(ValueError, ""):
with test_util.skip_if_error(self, ValueError, "test message"):
raise ValueError()
except unittest.SkipTest:
raise RuntimeError("Test is not supposed to skip.")
# Its own test case to reproduce variable sharing issues which only pop up when
# setUp() is overridden and super() is not called.
class GraphAndEagerNoVariableSharing(test_util.TensorFlowTestCase):
def setUp(self):
pass # Intentionally does not call TensorFlowTestCase's super()
@test_util.run_in_graph_and_eager_modes
def test_no_variable_sharing(self):
variable_scope.get_variable(
name="step_size",
initializer=np.array(1e-5, np.float32),
use_resource=True,
trainable=False)
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
@test_util.run_in_graph_and_eager_modes
def test_no_leaked_tensor_decorator(self):
class LeakedTensorTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_new_tensors
def test_has_leak(self):
self.a = constant_op.constant([3.], name="leak")
@test_util.assert_no_new_tensors
def test_has_no_leak(self):
constant_op.constant([3.], name="no-leak")
with self.assertRaisesRegex(AssertionError, "Tensors not deallocated"):
LeakedTensorTest().test_has_leak()
LeakedTensorTest().test_has_no_leak()
def test_no_new_objects_decorator(self):
class LeakedObjectTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(LeakedObjectTest, self).__init__(*args, **kwargs)
self.accumulation = []
@unittest.expectedFailure
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_leak(self):
self.accumulation.append([1.])
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_no_leak(self):
self.not_accumulating = [1.]
self.assertTrue(LeakedObjectTest("test_has_leak").run().wasSuccessful())
self.assertTrue(LeakedObjectTest("test_has_no_leak").run().wasSuccessful())
class RunFunctionsEagerlyInV2Test(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.named_parameters(
[("_RunEagerly", True), ("_RunGraph", False)])
def test_run_functions_eagerly(self, run_eagerly): # pylint: disable=g-wrong-blank-lines
results = []
@def_function.function
def add_two(x):
for _ in range(5):
x += 2
results.append(x)
return x
with test_util.run_functions_eagerly(run_eagerly):
add_two(constant_op.constant(2.))
if context.executing_eagerly():
if run_eagerly:
self.assertTrue(isinstance(t, ops.EagerTensor) for t in results)
else:
self.assertTrue(isinstance(t, ops.Tensor) for t in results)
else:
self.assertTrue(isinstance(t, ops.Tensor) for t in results)
if __name__ == "__main__":
googletest.main()
|
the-stack_0_16288
|
"""Fast thresholded subspace-constrained mean shift for geospatial data.
Introduction:
-------------
DREDGE, short for 'density ridge estimation describing geospatial evidence',
arguably an unnecessarily forced acronym, is a tool to find density ridges.
Based on the subspace-constrained mean shift algorithm [1], it approximates
principal curves for a given set of latitude-longitude coordinates. Various
improvements over the initial algorithm, and alterations to facilitate the
application to geospatial data, are implemented: Thresholding, as described
in cosmological research [2, 3] avoids dominant density ridges in sparsely
populated areas of the dataset. In addition, the haversine formula is used
as a distance metric to calculate the great circle distance, which makes the
tool applicable not only to city-scale data, but also to datasets spanning
multiple countries by taking the Earth's curvature into consideration.
Since DREDGE was initially developed to be applied to crime incident data,
the default bandwidth calculation follows a best-practice approach that is
well-accepted within quantitative criminology, using the mean distance to a
given number of nearest neighbors [4]. Since practitioners in that area of
study are often interested in the highest-density regions of dataset, the
tool also features the possibility to specify a top-percentage level for a
kernel density estimate that the ridge points should fall within.
Quickstart:
-----------
DREDGE is designed to be easy to use and needs only one input, name the
array of latitude-longitude values for coordinates. This data has to be
provided in the form of a NumPy array with two columns, with the latitudes
in the first and the longitudes in the second column. Additionally, four
optional parameters can be manually set by the user:
(1) The parameter 'neighbors' specifies the number of nearest neighbors
that should be used to calculate the optimal bandwidth if the latter
is not provided by the user. The default number of neighbors is 10.
(2) The parameter 'bandwidth' provides the bandwidth that is used for the
kernel density estimator and Gaussian kernel evaluations. By default,
an optimal bandwidth using the average distance to a number of neighbors
across all points in the provided dataset is calculated, with the number
of neighbors given by the parameter 'neighbors' explained above.
(3) The parameter 'convergence' specifies the convergence threshold to
determine when to stop iterations and return the density ridge points.
If the resulting density ridges don't follow clearly visible lines,
this parameter can be set to a lower value. The default is 0.01.
(4) The parameter 'percentage' should be set if only density ridge points
from high-density regions, as per a kernel density estimate of the
provided set of coordinates, are to be returned. If, fore example, the
parameter is set to '5', the density ridge points are evaluated via
the kernel density estimator, and only those above the 95th percentile,
as opposed to all of them as the default, are returned to the user.
A simple example for using DREDGE looks like this:
------------------------------------------------------------
| from dredge import filaments |
| |
| filaments(coordinates = your_latitudes_and_longitudes, |
| percentage = your_top_density_percentage) |
| |
------------------------------------------------------------
Here, the optional parameter 'percentage', which is explained above, is used.
Author:
--------
Ben Moews
Institute for Astronomy (IfA)
School of Physics & Astronomy
The University of Edinburgh
References:
-----------
[1] Ozertem, U. and Erdogmus, D. (2011): "Locally defined principal curves
and surfaces", JMLR, Vol. 12, pp. 1249-1286
[2] Chen, Y. C. et al. (2015), "Cosmic web reconstruction through density
ridges: Method and algorithm", MNRAS, Vol. 454, pp. 1140-1156
[3] Chen, Y. C. et al. (2016), "Cosmic web reconstruction through density
ridges: Catalogue", MNRAS, Vol. 461, pp. 3896-3909
[4] Williamson, D. et al. (1999), "A better method to smooth crime incident
data", ESRI ArcUser Magazine, January-March 1999, pp. 1-5
Packages and versions:
----------------------
The versions listed below were used in the development of X, but the exact
version numbers aren't specifically required. The installation process via
PyPI will take care of installing or updating every library to at least the
level that fulfills the requirement of providing the necessary functionality.
Python 3.4.5
NumPy 1.11.3
SciPy 0.18.1
Scikit-learn 0.19.1
"""
# Load the necessary libraries
import sys
import numpy as np
import scipy as sp
from sklearn.neighbors import KernelDensity as KDE
from sklearn.neighbors import NearestNeighbors as KNN
def filaments(coordinates,
neighbors = 10,
bandwidth = None,
convergence = 0.005,
percentage = None):
"""Estimate density rigdges for a user-provided dataset of coordinates.
This function uses an augmented version of the subspace-constrained mean
shift algorithm to return density ridges for a set of langitude-longitude
coordinates. Apart from the haversine distance to compute a more accurate
version of a common optimal kernel bandwidth calculation in criminology,
the code also features thresholding to avoid ridges in sparsely populated
areas. While only the coordinate set is a required input, the user can
override the number of nearest neighbors used to calculate the bandwidth
and the bandwidth itself, as well as the convergence threshold used to
assess when to terminate and the percentage indicating which top-level of
filament points in high-density regions should be returned. If the latter
is not chose, all filament points are returned in the output instead.
Parameters:
-----------
coordinates : array-like
The set of latitudes and longitudes as a two-column array of floats.
neighbors : int, defaults to 10
The number of neighbors used for the optimal bandwidth calculation.
bandwidth : float, defaults to None
The bandwidth used for kernel density estimates of data points.
convergence : float, defaults to 0.005
The convergence threshold for the inter-iteration update difference.
percentage : float, defaults to None
The percentage of highest-density filament points that are returned.
Returns:
--------
ridges : array-like
The coordinates for the estimated density ridges of the data.
Attributes:
-----------
None
"""
# Check if the inputs are valid
parameter_check(coordinates = coordinates,
neighbors = neighbors,
bandwidth = bandwidth,
convergence = convergence,
percentage = percentage)
print("Input parameters valid!\n")
print("Preparing for iterations ...\n")
# Check whether no bandwidth is provided
if bandwidth is None:
# Compute the average distance to the given number of neighbors
nearest_neighbors = KNN(n_neighbors = neighbors,
algorithm = 'ball_tree',
metric = 'haversine').fit(coordinates)
distances, _ = nearest_neighbors.kneighbors(X = coordinates)
bandwidth = np.mean(distances[:, 1:distances.shape[1]])
print("Automatically computed bandwidth: %f\n" % bandwidth)
# Compute a Gaussian KDE with the haversine formula
density_estimate = KDE(bandwidth = bandwidth,
metric = 'haversine',
kernel = 'gaussian',
algorithm = 'ball_tree').fit(coordinates)
# Create an evenly-spaced mesh in for the provided coordinates
mesh = mesh_generation(coordinates)
# Compute the threshold to omit mesh points in low-density areas
threshold, densities = threshold_function(mesh, density_estimate)
# Cut low-density mesh points from the set
ridges = mesh[densities > threshold, :]
# Intitialize the update change as larger than the convergence
update_change = np.multiply(2, convergence)
# Initialize the previous update change as zero
previous_update = 0
# Loop over the number of prescripted iterations
iteration_number = 0
#while not update_change < convergence:
while not update_change < convergence:
# Update the current iteration number
iteration_number = iteration_number + 1
# Print the current iteration number
print("Iteration %d ..." % iteration_number)
# Create a list to store all update values
updates = []
# Loop over the number of points in the mesh
for i in range(ridges.shape[0]):
# Compute the update movements for each point
point_updates = update_function(ridges[i], coordinates, bandwidth)
# Add the update movement to the respective point
ridges[i] = ridges[i] + point_updates
# Store the change between updates to check convergence
updates.append(np.abs(np.mean(np.sum(point_updates))))
# Get the update change to check convergence
update_average = np.mean(np.sum(updates))
update_change = np.abs(previous_update - update_average)
previous_update = update_average
# Check whether a top-percentage of points should be returned
if percentage is not None:
# Evaluate all mesh points on the kernel density estimate
evaluations = density_estimate.score_samples(ridges)
# Calculate the threshold value for a given percentage
valid_percentile = np.percentile(evaluations, [100 - percentage])
# Retain only the mesh points that are above the threshold
ridges = ridges[np.where(evaluations > valid_percentile)]
# Return the iteratively updated mesh as the density ridges
print("\nDone!")
return ridges
def haversine(point_1,
point_2):
"""Calculate the haversine distance between two coordinates.
This function calculates he haversine formula for two latitude-longitude
tuples, a formula used for the great-circle distance on a sphere. While
the effect of using this more accurate distance, as opposed to the more
common Euclidean distance, is negligible for smaller scales, this choice
allows the code to also be used on larger scales by taking the curvature
of the Earth into account.
Parameters:
-----------
point_1 : array-like
The coordinates for a point as a tuple of type [float, float].
point_2 : array-like
The coordinates for a point as a tuple of type [float, float].
Returns:
--------
haversine_distance : float
The haversine distance between the two provided points.
Attributes:
-----------
None
"""
# Specify the radius of the Earth in kilometers
earth_radius = 6372.8
# Extract latitudes and longitudes from the provided points
latitude_1 = point_1[0]
latitude_2 = point_2[0]
longitude_1 = point_1[1]
longitude_2 = point_2[1]
# Convert the latitudes and longitudes to radians
latitude_1, longitude_1 = np.radians((latitude_1, longitude_1))
latitude_2, longitude_2 = np.radians((latitude_2, longitude_2))
# Calculate the differences between latitudes in radians
latitude_difference = latitude_2 - latitude_1
# Calculate the differences between longitudes in radians
longitude_difference = longitude_2 - longitude_1
# Calculate the haversine distance between the coordinates
step_1 = np.square(np.sin(np.multiply(latitude_difference, 0.5)))
step_2 = np.square(np.sin(np.multiply(longitude_difference, 0.5)))
step_3 = np.multiply(np.cos(latitude_1), np.cos(latitude_2))
step_4 = np.arcsin(np.sqrt(step_1 + np.multiply(step_2, step_3)))
haversine_distance = np.multiply(np.multiply(2, earth_radius), step_4)
# Return the computed haversine distance for the coordinates
return haversine_distance
def mesh_generation(coordinates):
"""Generate a set of uniformly-random distributed points as a mesh.
The subspace-constrained mean shift algorithm operates on either a grid
or a uniform-random set of coordinates to iteratively shift them towards
the estimated density ridges. Due to the functionality of the code, the
second approach is chosen, with a uniformly-random set of coordinates
in the intervals covered by the provided dataset as a mesh. In order to
not operate on a too-small or too-large number of mesh points, the size
of the mesh is constrained to a lower limit of 50,000 and an upper limit
of 100,000, with the size of the provided dataset being used if it falls
within these limits. This is done to avoid overly long running times.
Parameters:
-----------
coordinates : array-like
The set of latitudes and longitudes as a two-column array of floats.
Returns:
--------
mesh : array-like
The set of uniform-random coordinates in the dataset's intervals.
Attributes:
-----------
None
"""
# Get the minimum and maximum for the latitudes
min_latitude = np.min(coordinates[:, 0])
max_latitude = np.max(coordinates[:, 0])
# Get the minimum and maximum for the longitudes
min_longitude = np.min(coordinates[:, 1])
max_longitude = np.max(coordinates[:, 1])
# Get the number of provided coordinates
size = int(np.min([1e5, np.max([5e4, len(coordinates)])]))
# Create an array of uniform-random points as a mesh
mesh_1 = np.random.uniform(min_latitude, max_latitude, size)
mesh_2 = np.random.uniform(min_longitude, max_longitude, size)
mesh = np.vstack((mesh_1.flatten(), mesh_2.flatten())).T
# Return the evenly-spaced mesh for the coordinates
return mesh
def threshold_function(mesh,
density_estimate):
"""Calculate the cut-off threshold for mesh point deletions.
This function calculates the threshold that is used to deleted mesh
points from the initial uniformly-random set of mesh points. The
rationale behind this approach is to avoid filaments in sparsely
populated regions of the provided dataset, leading to a final result
that only covers filaments in regions of a suitably high density.
Parameters:
-----------
mesh : array-like
The set of uniform-random coordinates in the dataset's intervals.
density_estimate : scikit-learn object
The kernel density estimator fitted on the provided dataset.
Returns:
--------
threshold : float
The cut-off threshold for the omission of points in the mesh.
density_array : array-like
The density estimates for all points in the given mesh.
Attributes:
-----------
None
"""
# Calculate the average of density estimates for the data
density_array = np.exp(density_estimate.score_samples(mesh))
density_sum = np.sum(density_array)
density_average = np.divide(density_sum, len(mesh))
# Compute the threshold via the RMS in the density fluctuation
density_difference = np.subtract(density_array, density_average)
square_sum = np.sum(np.square(density_difference))
threshold = np.sqrt(np.divide(square_sum, len(density_difference)))
# Return the threshold for the provided mesh and density etimate
return threshold, density_array
def update_function(point,
coordinates,
bandwidth):
"""Calculate the mean shift update for a provided mesh point.
This function calculates the mean shift update for a given point of
the mesh at the current iteration. This is done through a spectral
decomposition of the local inverse covariance matrix, shifting the
respective point closer towards the nearest estimated ridge. The
updates are provided as a tuple in the latitude-longitude space to
be added to the point's coordinate values.
Parameters:
-----------
point : array-like
The latitude-longitude coordinate tuple for a single mesh point.
coordinates : array-like
The set of latitudes and longitudes as a two-column array of floats.
Returns:
--------
point_updates : float
The tuple of latitude and longitude updates for the mesh point.
Attributes:
-----------
None
"""
# first, calculate the interpoint distance
squared_distance = np.sum(np.square(coordinates - point), axis=1)
# eqvaluate the kernel at each distance
weights = gaussian_kernel(squared_distance, bandwidth)
# now reweight each point
shift = np.divide(coordinates.T.dot(weights), np.sum(weights))
# first, we evaluate the mean shift update
update = shift - point
# Calculate the local inverse covariance for the decomposition
inverse_covariance = local_inv_cov(point, coordinates, bandwidth)
# Compute the eigendecomposition of the local inverse covariance
eigen_values, eigen_vectors = np.linalg.eig(inverse_covariance)
# Align the eigenvectors with the sorted eigenvalues
sorted_eigen_values = np.argsort(eigen_values)
eigen_vectors = eigen_vectors[:, sorted_eigen_values]
# Cut the eigenvectors according to the sorted eigenvalues
cut_eigen_vectors = eigen_vectors[:, 1:]
# Project the update to the eigenvector-spanned orthogonal subspace
point_updates = cut_eigen_vectors.dot(cut_eigen_vectors.T).dot(update)
# Return the projections as the point updates
return point_updates
def gaussian_kernel(values,
bandwidth):
"""Calculate the Gaussian kernel evaluation of distance values.
This function evaluates a Gaussian kernel for the squared distances
between a mesh point and the dataset, and for a given bandwidth.
Parameters:
-----------
values : array-like
The distances between a mesh point and provided coordinates.
bandwidth : float
The bandwidth used for kernel density estimates of data points.
Returns:
--------
kernel_value : float
The Gaussian kernel evaluations for the given distances.
Attributes:
-----------
None
"""
# Compute the kernel value for the given values
temp_1 = np.multiply(np.pi, np.square(bandwidth))
temp_2 = np.divide(1, np.sqrt(temp_1))
temp_3 = np.divide(values, np.square(bandwidth))
kernel_value = np.exp(np.multiply(np.negative(0.5), temp_3))
# Return the computed kernel value
return kernel_value
def local_inv_cov(point,
coordinates,
bandwidth):
"""Compute the local inverse covariance from the gradient and Hessian.
This function computes the local inverse covariance matrix for a given
mesh point and the provided dataset, using a given bandwidth. In order
to reach this result, the covariance matrix for the distances between
a mesh point and the dataset is calculated. After that, the Hessian
matrix is used to calculate the gradient at the given point's location.
Finally, the latter is used to arrive at the local inverse covariance.
Parameters:
-----------
point : array-like
The latitude-longitude coordinate tuple for a single mesh point.
coordinates : array-like
The set of latitudes and longitudes as a two-column array of floats.
bandwidth : float
The bandwidth used for kernel density estimates of data points.
Returns:
--------
inverse_covariance : array-like
The local inverse covariance for the given point and coordinates.
Attributes:
-----------
None
"""
# Calculate the squared distance between points
squared_distance = np.sum(np.square(coordinates - point), axis=1)
# Compute the average of the weights as the estimate
weights = gaussian_kernel(squared_distance, bandwidth)
weight_average = np.mean(weights)
# Get the number of points and the dimensionality
number_points, number_columns = coordinates.shape
# Calculate one over the given bandwidth
fraction_1 = np.divide(1, np.square(bandwidth))
# Calculate one over the given number of points
fraction_2 = np.divide(1, number_points)
# Compute the mean for the provided points
mu = np.multiply(fraction_1, (coordinates - point))
# Compute the covariance matrix for the provided points
covariance = gaussian_kernel(squared_distance, bandwidth)
# Compute the Hessian matrix for the provided points
temp_1 = np.multiply(fraction_1, np.eye(number_columns))
temp_2 = (np.multiply(covariance, mu.T)).dot(mu)
temp_3 = np.multiply(fraction_2, temp_2)
temp_4 = np.multiply(temp_1, np.sum(covariance))
hessian = temp_3 - np.multiply(fraction_2, temp_4)
# Get the number of data points and the dimensionality
number_rows, number_columns = coordinates.shape
# Compute the gradient at the given point for the data
temp_5 = np.mean(np.multiply(covariance, mu.T), axis=1)
gradient = np.negative(temp_5)
# Compute the loval inverse covariance for the inputs
temp_6 = np.divide(np.negative(1), weight_average)
temp_7 = np.divide(1, np.square(weight_average))
temp_8 = np.multiply(temp_7, gradient.dot(gradient.T))
inverse_covariance = np.multiply(temp_6, hessian) + temp_8
# Return the local inverse covariance
return inverse_covariance
def parameter_check(coordinates,
neighbors,
bandwidth,
convergence,
percentage):
"""Check the main function inputs for unsuitable formats or values.
This function checks all of the user-provided main function inputs for
their suitability to be used by the code. This is done right at the
top of the main function to catch input errors early and before any
time is spent on time-consuming computations. Each faulty input is
identified, and a customized error message is printed for the user
to inform about the correct inputs before the code is terminated.
Parameters:
-----------
coordinates : array-like
The set of latitudes and longitudes as a two-column array of floats.
neighbors : int
The number of neighbors used for the optimal bandwidth calculation.
bandwidth : float
The bandwidth used for kernel density estimates of data points.
convergence : float
The convergence threshold for the inter-iteration update difference.
percentage : float
The percentage of highest-density filament points that are returned.
Returns:
--------
None
Attributes:
-----------
None
"""
# Create a boolean vector to keep track of incorrect inputs
incorrect_inputs = np.zeros(5, dtype = bool)
# Check whether two-dimensional coordinates are provided
if not type(coordinates) == np.ndarray:
incorrect_inputs[0] = True
elif not coordinates.shape[1] == 2:
incorrect_inputs[0] = True
# Check whether neighbors is a positive integer or float
if not ((type(neighbors) == int and neighbors > 0)
and not ((type(neighbors) == float)
and (neighbors > 0)
and (neighbors.is_integer() == True))):
incorrect_inputs[1] = True
# Check whether bandwidth is a positive integer or float
if not bandwidth == None:
if not ((type(bandwidth) == int and bandwidth > 0)
or (type(bandwidth) == float) and bandwidth > 0):
incorrect_inputs[2] = True
# Check whether convergence is a positive integer or float
if not convergence == None:
if not ((type(convergence) == int and convergence > 0)
or (type(convergence) == float) and convergence > 0):
incorrect_inputs[3] = True
# Check whether percentage is a valid percentage value
if not percentage == None:
if not ((type(percentage) == int and percentage >= 0
and percentage <= 100)
or ((type(percentage) == float) and percentage >= 0
and percentage <= 100)):
incorrect_inputs[4] = True
# Define error messages for each parameter failing the tests
errors = ['ERROR: coordinates: Must be a 2-column numpy.ndarray',
'ERROR: neighbors: Must be a whole-number int or float > 0',
'ERROR: bandwidth: Must be an int or float > 0, or None',
'ERROR: convergence: Must be an int or float > 0, or None',
'ERROR: percentage: Must be an int or float in [0, 100], or None']
# Print eventual error messages and terminate the code
if any(value == True for value in incorrect_inputs):
for i in range(0, len(errors)):
if incorrect_inputs[i] == True:
print(errors[i])
sys.exit()
|
the-stack_0_16290
|
import os
from io import BytesIO
import mimetypes
from django.db.models.fields.files import ImageField
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import signals
from PIL import Image
# todo: Add 'delete_with_model' option that will delete thumbnail and image when model is deleted.
def _get_thumbnail_filename(filename, append_text="-thumbnail"):
"""
Returns a thumbnail version of the file name.
"""
name, ext = os.path.splitext(filename)
return ''.join([name, append_text, ext])
class ThumbnailField(object):
"""
Instances of this class will be used to access data of the
generated thumbnails. A thumbnail is created when the image is saved
initially, but there's nothing persisted that references the thumbnail.
When the `SizedImageField` is instantiated, it gets this thumbnail
field attached to it where the thumbnail becomes accessible.
for example: `image.thumbnail.url`
"""
def __init__(self, name, storage):
"""
Uses same storage as the parent field
"""
self.name = name
self.storage = storage
@property
def path(self):
return self.storage.path(self.name)
@property
def url(self):
return self.storage.url(self.name)
@property
def size(self):
return self.storage.size(self.name)
class SizedImageField(ImageField):
"""
An Image field that allows auto resizing auto creation of thumbnails.
"""
def __init__(self,
verbose_name=None,
name=None,
width_field=None,
height_field=None,
size=None,
thumbnail_size=None,
**kwargs):
"""
Added fields:
- size: a tuple containing width and height to resize image, and
an optional boolean setting if is wanted forcing that size
(None for not resizing).
- thumbnail_size: a tuple with same values than `size' (None for
not creating a thumbnail
Example: (640, 480, True) -> Will resize image to a width of 640px and
a height of 480px. File will be cut if necessary for forcing
the image to have the desired size
"""
self.size = self._get_resize_options(size)
self.thumbnail_size = self._get_resize_options(thumbnail_size)
super(SizedImageField, self).__init__(verbose_name, name, width_field,
height_field, **kwargs)
def _get_resize_options(self, dimensions):
"""
:param dimensions:
A tuple of (width, height, force_size).
'force_size' can be left off and will default to False.
"""
if dimensions and isinstance(dimensions, (tuple, list)):
if len(dimensions) < 3:
dimensions = tuple(dimensions) + (False, )
return dimensions
def contribute_to_class(self, cls, name):
"""
Makes sure thumbnail gets set when image field initialized.
"""
super(SizedImageField, self).contribute_to_class(cls, name)
signals.post_init.connect(self._set_thumbnail, sender=cls)
def pre_save(self, model_instance, add):
"""
Resizes, commits image to storage, and returns field's value just before saving.
"""
file = getattr(model_instance, self.attname)
if file and not file._committed:
file.name = self._clean_file_name(model_instance, file.name)
file.file = self._resize_image(model_instance, file)
file.save(file.name, file, save=False)
return file
def _clean_file_name(self, model_instance, filename):
"""
We need to make sure we know the full file name before we save the thumbnail so
we can be sure the name doesn't change on save.
This method gets the available filename and returns just the file part.
"""
available_name = self.storage.get_available_name(
self.generate_filename(model_instance, filename))
return os.path.basename(available_name)
def _create_thumbnail(self, model_instance, thumbnail, image_name):
"""
Resizes and saves the thumbnail image
"""
thumbnail = self._do_resize(thumbnail, self.thumbnail_size)
full_image_name = self.generate_filename(model_instance, image_name)
thumbnail_filename = _get_thumbnail_filename(full_image_name)
thumb = self._get_simple_uploaded_file(thumbnail, thumbnail_filename)
self.storage.save(thumbnail_filename, thumb)
def _resize_image(self, model_instance, image_field):
""""""
image_name = image_field.name
image = Image.open(image_field.file)
if image.mode not in ('L', 'RGB'):
image = image.convert('RGB')
if self.size:
image = self._do_resize(image, self.size)
if self.thumbnail_size:
self._create_thumbnail(model_instance, image.copy(), image_name)
return self._get_simple_uploaded_file(image, image_name)
def _do_resize(self, img, dimensions):
width, height, force_size = dimensions
if force_size:
img.resize((width, height), Image.ANTIALIAS)
else:
img.thumbnail((width, height), Image.ANTIALIAS)
return img
def _set_thumbnail(self, instance=None, **kwargs):
"""
Sets a `thumbnail` attribute on the image field class.
On thumbnail you can access name, url, path attributes
"""
image_field = getattr(instance, self.name)
if image_field:
thumbnail_filename = _get_thumbnail_filename(image_field.name)
thumbnail_field = ThumbnailField(thumbnail_filename, self.storage)
setattr(image_field, 'thumbnail', thumbnail_field)
def _get_simple_uploaded_file(self, image, file_name):
"""
:param image:
a python PIL ``Image`` instance.
:param file_name:
The file name of the image.
:returns:
A django ``SimpleUploadedFile`` instance ready to be saved.
"""
extension = os.path.splitext(file_name)[1]
mimetype, encoding = mimetypes.guess_type(file_name)
content_type = mimetype or 'image/png'
temp_handle = BytesIO()
image.save(temp_handle, self._get_pil_format(extension))
temp_handle.seek(0) # rewind the file
suf = SimpleUploadedFile(
file_name,
temp_handle.read(),
content_type=content_type,
)
return suf
def _get_pil_format(self, extension):
"""
:param extension:
The file name extension (.png, .jpg, etc...)
:returns:
The file format PIL needs from the file extension.
Eg. PNG or JPEG
"""
return Image.EXTENSION[extension.lower()]
|
the-stack_0_16292
|
class ContentFilteringRules(object):
def __init__(self, session):
super(ContentFilteringRules, self).__init__()
self._session = session
def getNetworkContentFiltering(self, networkId: str):
"""
**Return the content filtering settings for an MX network**
https://api.meraki.com/api_docs#return-the-content-filtering-settings-for-an-mx-network
- networkId (string)
"""
metadata = {
'tags': ['Content filtering rules'],
'operation': 'getNetworkContentFiltering',
}
resource = f'/networks/{networkId}/contentFiltering'
return self._session.get(metadata, resource)
def updateNetworkContentFiltering(self, networkId: str, **kwargs):
"""
**Update the content filtering settings for an MX network**
https://api.meraki.com/api_docs#update-the-content-filtering-settings-for-an-mx-network
- networkId (string)
- allowedUrlPatterns (array): A whitelist of URL patterns to allow
- blockedUrlPatterns (array): A blacklist of URL patterns to block
- blockedUrlCategories (array): A list of URL categories to block
- urlCategoryListSize (string): URL category list size which is either 'topSites' or 'fullList'
"""
kwargs.update(locals())
if 'urlCategoryListSize' in kwargs:
options = ['topSites', 'fullList']
assert kwargs['urlCategoryListSize'] in options, f'''"urlCategoryListSize" cannot be "{kwargs['urlCategoryListSize']}", & must be set to one of: {options}'''
metadata = {
'tags': ['Content filtering rules'],
'operation': 'updateNetworkContentFiltering',
}
resource = f'/networks/{networkId}/contentFiltering'
body_params = ['allowedUrlPatterns', 'blockedUrlPatterns', 'blockedUrlCategories', 'urlCategoryListSize']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return self._session.put(metadata, resource, payload)
|
the-stack_0_16293
|
import os
from pikka_bird_collector.collectors.mysql import Mysql
from pikka_bird_collector.collectors.base_port_command import BasePortCommand
class TestMysql:
@staticmethod
def fixture_path(filename):
return os.path.join(os.path.dirname(__file__), '../fixtures', filename)
@staticmethod
def read_fixture(filename):
with open(filename, 'r') as f_h:
d = f_h.read()
return d
def mock_cmd_show_status(self):
f = TestMysql.fixture_path('mysql/show_status.txt')
return TestMysql.read_fixture(f)
def mock_cmd_show_master_status(self):
f = TestMysql.fixture_path('mysql/show_master_status.txt')
return TestMysql.read_fixture(f)
def mock_cmd_show_slave_status(self):
f = TestMysql.fixture_path('mysql/show_slave_status.txt')
return TestMysql.read_fixture(f)
def mock_cmd_show_slave_hosts(self):
f = TestMysql.fixture_path('mysql/show_slave_hosts.txt')
return TestMysql.read_fixture(f)
def mock_cmd_show_variables(self):
f = TestMysql.fixture_path('mysql/show_variables.txt')
return TestMysql.read_fixture(f)
def mock_exec_command(self, command_f):
if Mysql.CMD_SHOW_STATUS in command_f:
return self.mock_cmd_show_status()
elif Mysql.CMD_SHOW_MASTER_STATUS in command_f:
return self.mock_cmd_show_master_status()
elif Mysql.CMD_SHOW_SLAVE_STATUS in command_f:
return self.mock_cmd_show_slave_status()
elif Mysql.CMD_SHOW_SLAVE_HOSTS in command_f:
return self.mock_cmd_show_slave_hosts()
elif Mysql.CMD_SHOW_VARIABLES in command_f:
return self.mock_cmd_show_variables()
def mock_collect_status(self):
return {
'aborted_clients': 0,
'aborted_connects': 13,
'binlog_cache_disk_use': 0,
'binlog_cache_use': 0,
'binlog_stmt_cache_disk_use': 0,
'binlog_stmt_cache_use': 0,
'bytes_received': 224,
'bytes_sent': 168,
'com_admin_commands': 0,
'com_alter_db': 0,
'com_alter_db_upgrade': 0,
'com_alter_event': 0,
'com_alter_function': 0,
'com_alter_procedure': 0,
'com_alter_server': 0,
'com_alter_table': 0,
'com_alter_tablespace': 0,
'com_alter_user': 0,
'com_analyze': 0,
'com_assign_to_keycache': 0,
'com_begin': 0,
'com_binlog': 0,
'com_call_procedure': 0,
'com_change_db': 0,
'com_change_master': 0,
'com_check': 0,
'com_checksum': 0,
'com_commit': 0,
'com_create_db': 0,
'com_create_event': 0,
'com_create_function': 0,
'com_create_index': 0,
'com_create_procedure': 0,
'com_create_server': 0,
'com_create_table': 0,
'com_create_trigger': 0,
'com_create_udf': 0,
'com_create_user': 0,
'com_create_view': 0,
'com_dealloc_sql': 0,
'com_delete': 0,
'com_delete_multi': 0,
'com_do': 0,
'com_drop_db': 0,
'com_drop_event': 0,
'com_drop_function': 0,
'com_drop_index': 0,
'com_drop_procedure': 0,
'com_drop_server': 0,
'com_drop_table': 0,
'com_drop_trigger': 0,
'com_drop_user': 0,
'com_drop_view': 0,
'com_empty_query': 0,
'com_execute_sql': 0,
'com_flush': 0,
'com_get_diagnostics': 0,
'com_grant': 0,
'com_ha_close': 0,
'com_ha_open': 0,
'com_ha_read': 0,
'com_help': 0,
'com_insert': 0,
'com_insert_select': 0,
'com_install_plugin': 0,
'com_kill': 0,
'com_load': 0,
'com_lock_tables': 0,
'com_optimize': 0,
'com_preload_keys': 0,
'com_prepare_sql': 0,
'com_purge': 0,
'com_purge_before_date': 0,
'com_release_savepoint': 0,
'com_rename_table': 0,
'com_rename_user': 0,
'com_repair': 0,
'com_replace': 0,
'com_replace_select': 0,
'com_reset': 0,
'com_resignal': 0,
'com_revoke': 0,
'com_revoke_all': 0,
'com_rollback': 0,
'com_rollback_to_savepoint': 0,
'com_savepoint': 0,
'com_select': 1,
'com_set_option': 0,
'com_show_binlog_events': 0,
'com_show_binlogs': 0,
'com_show_charsets': 0,
'com_show_collations': 0,
'com_show_create_db': 0,
'com_show_create_event': 0,
'com_show_create_func': 0,
'com_show_create_proc': 0,
'com_show_create_table': 0,
'com_show_create_trigger': 0,
'com_show_databases': 0,
'com_show_engine_logs': 0,
'com_show_engine_mutex': 0,
'com_show_engine_status': 0,
'com_show_errors': 0,
'com_show_events': 0,
'com_show_fields': 0,
'com_show_function_code': 0,
'com_show_function_status': 0,
'com_show_grants': 0,
'com_show_keys': 0,
'com_show_master_status': 0,
'com_show_open_tables': 0,
'com_show_plugins': 0,
'com_show_privileges': 0,
'com_show_procedure_code': 0,
'com_show_procedure_status': 0,
'com_show_processlist': 0,
'com_show_profile': 0,
'com_show_profiles': 0,
'com_show_relaylog_events': 0,
'com_show_slave_hosts': 0,
'com_show_slave_status': 0,
'com_show_status': 1,
'com_show_storage_engines': 0,
'com_show_table_status': 0,
'com_show_tables': 0,
'com_show_triggers': 0,
'com_show_variables': 0,
'com_show_warnings': 0,
'com_signal': 0,
'com_slave_start': 0,
'com_slave_stop': 0,
'com_stmt_close': 0,
'com_stmt_execute': 0,
'com_stmt_fetch': 0,
'com_stmt_prepare': 0,
'com_stmt_reprepare': 0,
'com_stmt_reset': 0,
'com_stmt_send_long_data': 0,
'com_truncate': 0,
'com_uninstall_plugin': 0,
'com_unlock_tables': 0,
'com_update': 0,
'com_update_multi': 0,
'com_xa_commit': 0,
'com_xa_end': 0,
'com_xa_prepare': 0,
'com_xa_recover': 0,
'com_xa_rollback': 0,
'com_xa_start': 0,
'compression': False,
'connection_errors_accept': 0,
'connection_errors_internal': 0,
'connection_errors_max_connections': 0,
'connection_errors_peer_address': 0,
'connection_errors_select': 0,
'connection_errors_tcpwrap': 0,
'connections': 148,
'created_tmp_disk_tables': 0,
'created_tmp_files': 5,
'created_tmp_tables': 0,
'delayed_errors': 0,
'delayed_insert_threads': 0,
'delayed_writes': 0,
'flush_commands': 1,
'handler_commit': 0,
'handler_delete': 0,
'handler_discover': 0,
'handler_external_lock': 0,
'handler_mrr_init': 0,
'handler_prepare': 0,
'handler_read_first': 0,
'handler_read_key': 0,
'handler_read_last': 0,
'handler_read_next': 0,
'handler_read_prev': 0,
'handler_read_rnd': 0,
'handler_read_rnd_next': 0,
'handler_rollback': 0,
'handler_savepoint': 0,
'handler_savepoint_rollback': 0,
'handler_update': 0,
'handler_write': 0,
'innodb_available_undo_logs': 128,
'innodb_buffer_pool_bytes_data': 7454720,
'innodb_buffer_pool_bytes_dirty': 0,
'innodb_buffer_pool_dump_status': 'not started',
'innodb_buffer_pool_load_status': 'not started',
'innodb_buffer_pool_pages_data': 455,
'innodb_buffer_pool_pages_dirty': 0,
'innodb_buffer_pool_pages_flushed': 1,
'innodb_buffer_pool_pages_free': 7736,
'innodb_buffer_pool_pages_misc': 0,
'innodb_buffer_pool_pages_total': 8191,
'innodb_buffer_pool_read_ahead': 0,
'innodb_buffer_pool_read_ahead_evicted': 0,
'innodb_buffer_pool_read_ahead_rnd': 0,
'innodb_buffer_pool_read_requests': 8252,
'innodb_buffer_pool_reads': 456,
'innodb_buffer_pool_wait_free': 0,
'innodb_buffer_pool_write_requests': 1,
'innodb_data_fsyncs': 5,
'innodb_data_pending_fsyncs': 0,
'innodb_data_pending_reads': 0,
'innodb_data_pending_writes': 0,
'innodb_data_read': 7540736,
'innodb_data_reads': 477,
'innodb_data_writes': 5,
'innodb_data_written': 34304,
'innodb_dblwr_pages_written': 1,
'innodb_dblwr_writes': 1,
'innodb_have_atomic_builtins': True,
'innodb_log_waits': 0,
'innodb_log_write_requests': 0,
'innodb_log_writes': 1,
'innodb_num_open_files': 14,
'innodb_os_log_fsyncs': 3,
'innodb_os_log_pending_fsyncs': 0,
'innodb_os_log_pending_writes': 0,
'innodb_os_log_written': 512,
'innodb_page_size': 16384,
'innodb_pages_created': 0,
'innodb_pages_read': 455,
'innodb_pages_written': 1,
'innodb_row_lock_current_waits': 0,
'innodb_row_lock_time': 0,
'innodb_row_lock_time_avg': 0,
'innodb_row_lock_time_max': 0,
'innodb_row_lock_waits': 0,
'innodb_rows_deleted': 0,
'innodb_rows_inserted': 0,
'innodb_rows_read': 0,
'innodb_rows_updated': 0,
'innodb_truncated_status_writes': 0,
'key_blocks_not_flushed': 0,
'key_blocks_unused': 6698,
'key_blocks_used': 0,
'key_read_requests': 0,
'key_reads': 0,
'key_write_requests': 0,
'key_writes': 0,
'last_query_cost': 0.0,
'last_query_partial_plans': 0,
'max_used_connections': 2,
'not_flushed_delayed_rows': 0,
'open_files': 18,
'open_streams': 0,
'open_table_definitions': 68,
'open_tables': 61,
'opened_files': 118,
'opened_table_definitions': 0,
'opened_tables': 0,
'performance_schema_accounts_lost': 0,
'performance_schema_cond_classes_lost': 0,
'performance_schema_cond_instances_lost': 0,
'performance_schema_digest_lost': 0,
'performance_schema_file_classes_lost': 0,
'performance_schema_file_handles_lost': 0,
'performance_schema_file_instances_lost': 0,
'performance_schema_hosts_lost': 0,
'performance_schema_locker_lost': 0,
'performance_schema_mutex_classes_lost': 0,
'performance_schema_mutex_instances_lost': 0,
'performance_schema_rwlock_classes_lost': 0,
'performance_schema_rwlock_instances_lost': 0,
'performance_schema_session_connect_attrs_lost': 0,
'performance_schema_socket_classes_lost': 0,
'performance_schema_socket_instances_lost': 0,
'performance_schema_stage_classes_lost': 0,
'performance_schema_statement_classes_lost': 0,
'performance_schema_table_handles_lost': 0,
'performance_schema_table_instances_lost': 0,
'performance_schema_thread_classes_lost': 0,
'performance_schema_thread_instances_lost': 0,
'performance_schema_users_lost': 0,
'prepared_stmt_count': 0,
'qcache_free_blocks': 1,
'qcache_free_memory': 1031336,
'qcache_hits': 0,
'qcache_inserts': 0,
'qcache_lowmem_prunes': 0,
'qcache_not_cached': 136,
'qcache_queries_in_cache': 0,
'qcache_total_blocks': 1,
'queries': 410,
'questions': 2,
'rsa_public_key': None,
'select_full_join': 0,
'select_full_range_join': 0,
'select_range': 0,
'select_range_check': 0,
'select_scan': 0,
'slave_heartbeat_period': None,
'slave_last_heartbeat': None,
'slave_open_temp_tables': 0,
'slave_received_heartbeats': None,
'slave_retried_transactions': None,
'slave_running': False,
'slow_launch_threads': 0,
'slow_queries': 0,
'sort_merge_passes': 0,
'sort_range': 0,
'sort_rows': 0,
'sort_scan': 0,
'ssl_accept_renegotiates': 0,
'ssl_accepts': 0,
'ssl_callback_cache_hits': 0,
'ssl_cipher': None,
'ssl_cipher_list': None,
'ssl_client_connects': 0,
'ssl_connect_renegotiates': 0,
'ssl_ctx_verify_depth': 0,
'ssl_ctx_verify_mode': 0,
'ssl_default_timeout': 0,
'ssl_finished_accepts': 0,
'ssl_finished_connects': 0,
'ssl_server_not_after': None,
'ssl_server_not_before': None,
'ssl_session_cache_hits': 0,
'ssl_session_cache_misses': 0,
'ssl_session_cache_mode': 'NONE',
'ssl_session_cache_overflows': 0,
'ssl_session_cache_size': 0,
'ssl_session_cache_timeouts': 0,
'ssl_sessions_reused': 0,
'ssl_used_session_cache_entries': 0,
'ssl_verify_depth': 0,
'ssl_verify_mode': 0,
'ssl_version': None,
'table_locks_immediate': 74,
'table_locks_waited': 0,
'table_open_cache_hits': 0,
'table_open_cache_misses': 0,
'table_open_cache_overflows': 0,
'tc_log_max_pages_used': 0,
'tc_log_page_size': 0,
'tc_log_page_waits': 0,
'threads_cached': 0,
'threads_connected': 2,
'threads_created': 2,
'threads_running': 1,
'uptime': 2616535,
'uptime_since_flush_status': 2616535}
def mock_collect_master_status(self):
return {
'mysql-bin.000024': {
'binlog_do_db': None,
'binlog_ignore_db': None,
'file': 'mysql-bin.000024',
'position': 64795006}}
def mock_collect_slave_status(self):
return {
'connect_retry': 60,
'exec_master_log_pos': 64707836,
'last_errno': 0,
'last_error': None,
'last_io_errno': 0,
'last_io_error': None,
'last_sql_errno': 0,
'last_sql_error': None,
'master_host': 'i-00000000.example.com',
'master_log_file': 'mysql-bin.000024',
'master_port': 3306,
'master_server_id': 1,
'master_ssl_allowed': False,
'master_ssl_ca_file': None,
'master_ssl_ca_path': None,
'master_ssl_cert': None,
'master_ssl_cipher': None,
'master_ssl_key': None,
'master_ssl_verify_server_cert': False,
'master_user': 'repl',
'read_master_log_pos': 64707836,
'relay_log_file': 'mysqld-relay-bin.000064',
'relay_log_pos': 64659963,
'relay_log_space': 64660762,
'relay_master_log_file': 'mysql-bin.000024',
'replicate_do_db': None,
'replicate_do_table': None,
'replicate_ignore_db': None,
'replicate_ignore_server_ids': None,
'replicate_ignore_table': None,
'replicate_wild_do_table': None,
'replicate_wild_ignore_table': None,
'seconds_behind_master': 0,
'skip_counter': 0,
'slave_io_running': True,
'slave_io_state': 'Waiting for master to send event',
'slave_sql_running': True,
'until_condition': 'None',
'until_log_file': None,
'until_log_pos': 0}
def mock_collect_slave_hosts(self):
return {
'2': {
'host': 'i-00000000',
'master_id': 1,
'port': 3306,
'server_id': 2}}
def mock_collect_variables(self):
return {
'auto_increment_increment': 1,
'auto_increment_offset': 1,
'autocommit': True,
'automatic_sp_privileges': True,
'back_log': 80,
'basedir': '/usr/local/Cellar/mysql/5.6.23',
'big_tables': False,
'bind_address': '127.0.0.1',
'binlog_cache_size': 32768,
'binlog_checksum': 'CRC32',
'binlog_direct_non_transactional_updates': False,
'binlog_error_action': 'IGNORE_ERROR',
'binlog_format': 'STATEMENT',
'binlog_gtid_simple_recovery': False,
'binlog_max_flush_queue_time': 0,
'binlog_order_commits': True,
'binlog_row_image': 'FULL',
'binlog_rows_query_log_events': False,
'binlog_stmt_cache_size': 32768,
'binlogging_impossible_mode': 'IGNORE_ERROR',
'block_encryption_mode': 'aes-128-ecb',
'bulk_insert_buffer_size': 8388608,
'character_set_client': 'utf8',
'character_set_connection': 'utf8',
'character_set_database': 'utf8',
'character_set_filesystem': 'binary',
'character_set_results': 'utf8',
'character_set_server': 'utf8',
'character_set_system': 'utf8',
'character_sets_dir': '/usr/local/Cellar/mysql/5.6.23/share/mysql/charsets/',
'collation_connection': 'utf8_general_ci',
'collation_database': 'utf8_general_ci',
'collation_server': 'utf8_general_ci',
'completion_type': 'NO_CHAIN',
'concurrent_insert': 'AUTO',
'connect_timeout': 10,
'core_file': False,
'datadir': '/usr/local/var/mysql/',
'date_format': '%Y-%m-%d',
'datetime_format': '%Y-%m-%d %H:%i:%s',
'default_storage_engine': 'InnoDB',
'default_tmp_storage_engine': 'InnoDB',
'default_week_format': 0,
'delay_key_write': True,
'delayed_insert_limit': 100,
'delayed_insert_timeout': 300,
'delayed_queue_size': 1000,
'disconnect_on_expired_password': True,
'div_precision_increment': 4,
'end_markers_in_json': False,
'enforce_gtid_consistency': False,
'eq_range_index_dive_limit': 10,
'error_count': 0,
'event_scheduler': False,
'expire_logs_days': 0,
'explicit_defaults_for_timestamp': False,
'external_user': None,
'flush': False,
'flush_time': 0,
'foreign_key_checks': True,
'ft_boolean_syntax': '+ -><()~*:""&|',
'ft_max_word_len': 84,
'ft_min_word_len': 4,
'ft_query_expansion_limit': 20,
'ft_stopword_file': '(built-in)',
'general_log': False,
'general_log_file': '/usr/local/var/mysql/tiredpixel.log',
'group_concat_max_len': 1024,
'gtid_executed': None,
'gtid_mode': False,
'gtid_next': 'AUTOMATIC',
'gtid_owned': None,
'gtid_purged': None,
'have_compress': True,
'have_crypt': True,
'have_dynamic_loading': True,
'have_geometry': True,
'have_openssl': 'DISABLED',
'have_profiling': True,
'have_query_cache': True,
'have_rtree_keys': True,
'have_ssl': 'DISABLED',
'have_symlink': True,
'host_cache_size': 279,
'hostname': 'tiredpixel.home',
'identity': 0,
'ignore_builtin_innodb': False,
'ignore_db_dirs': None,
'init_connect': None,
'init_file': None,
'init_slave': None,
'innodb_adaptive_flushing': True,
'innodb_adaptive_flushing_lwm': 10,
'innodb_adaptive_hash_index': True,
'innodb_adaptive_max_sleep_delay': 150000,
'innodb_additional_mem_pool_size': 8388608,
'innodb_api_bk_commit_interval': 5,
'innodb_api_disable_rowlock': False,
'innodb_api_enable_binlog': False,
'innodb_api_enable_mdl': False,
'innodb_api_trx_level': 0,
'innodb_autoextend_increment': 64,
'innodb_autoinc_lock_mode': 1,
'innodb_buffer_pool_dump_at_shutdown': False,
'innodb_buffer_pool_dump_now': False,
'innodb_buffer_pool_filename': 'ib_buffer_pool',
'innodb_buffer_pool_instances': 8,
'innodb_buffer_pool_load_abort': False,
'innodb_buffer_pool_load_at_startup': False,
'innodb_buffer_pool_load_now': False,
'innodb_buffer_pool_size': 134217728,
'innodb_change_buffer_max_size': 25,
'innodb_change_buffering': 'all',
'innodb_checksum_algorithm': 'innodb',
'innodb_checksums': True,
'innodb_cmp_per_index_enabled': False,
'innodb_commit_concurrency': 0,
'innodb_compression_failure_threshold_pct': 5,
'innodb_compression_level': 6,
'innodb_compression_pad_pct_max': 50,
'innodb_concurrency_tickets': 5000,
'innodb_data_file_path': 'ibdata1:12M:autoextend',
'innodb_data_home_dir': None,
'innodb_disable_sort_file_cache': False,
'innodb_doublewrite': True,
'innodb_fast_shutdown': 1,
'innodb_file_format': 'Antelope',
'innodb_file_format_check': True,
'innodb_file_format_max': 'Antelope',
'innodb_file_per_table': True,
'innodb_flush_log_at_timeout': 1,
'innodb_flush_log_at_trx_commit': 1,
'innodb_flush_method': None,
'innodb_flush_neighbors': 1,
'innodb_flushing_avg_loops': 30,
'innodb_force_load_corrupted': False,
'innodb_force_recovery': 0,
'innodb_ft_aux_table': None,
'innodb_ft_cache_size': 8000000,
'innodb_ft_enable_diag_print': False,
'innodb_ft_enable_stopword': True,
'innodb_ft_max_token_size': 84,
'innodb_ft_min_token_size': 3,
'innodb_ft_num_word_optimize': 2000,
'innodb_ft_result_cache_limit': 2000000000,
'innodb_ft_server_stopword_table': None,
'innodb_ft_sort_pll_degree': 2,
'innodb_ft_total_cache_size': 640000000,
'innodb_ft_user_stopword_table': None,
'innodb_io_capacity': 200,
'innodb_io_capacity_max': 2000,
'innodb_large_prefix': False,
'innodb_lock_wait_timeout': 50,
'innodb_locks_unsafe_for_binlog': False,
'innodb_log_buffer_size': 8388608,
'innodb_log_compressed_pages': True,
'innodb_log_file_size': 50331648,
'innodb_log_files_in_group': 2,
'innodb_log_group_home_dir': './',
'innodb_lru_scan_depth': 1024,
'innodb_max_dirty_pages_pct': 75,
'innodb_max_dirty_pages_pct_lwm': 0,
'innodb_max_purge_lag': 0,
'innodb_max_purge_lag_delay': 0,
'innodb_mirrored_log_groups': 1,
'innodb_monitor_disable': None,
'innodb_monitor_enable': None,
'innodb_monitor_reset': None,
'innodb_monitor_reset_all': None,
'innodb_old_blocks_pct': 37,
'innodb_old_blocks_time': 1000,
'innodb_online_alter_log_max_size': 134217728,
'innodb_open_files': 2000,
'innodb_optimize_fulltext_only': False,
'innodb_page_size': 16384,
'innodb_print_all_deadlocks': False,
'innodb_purge_batch_size': 300,
'innodb_purge_threads': 1,
'innodb_random_read_ahead': False,
'innodb_read_ahead_threshold': 56,
'innodb_read_io_threads': 4,
'innodb_read_only': False,
'innodb_replication_delay': 0,
'innodb_rollback_on_timeout': False,
'innodb_rollback_segments': 128,
'innodb_sort_buffer_size': 1048576,
'innodb_spin_wait_delay': 6,
'innodb_stats_auto_recalc': True,
'innodb_stats_method': 'nulls_equal',
'innodb_stats_on_metadata': False,
'innodb_stats_persistent': True,
'innodb_stats_persistent_sample_pages': 20,
'innodb_stats_sample_pages': 8,
'innodb_stats_transient_sample_pages': 8,
'innodb_status_output': False,
'innodb_status_output_locks': False,
'innodb_strict_mode': False,
'innodb_support_xa': True,
'innodb_sync_array_size': 1,
'innodb_sync_spin_loops': 30,
'innodb_table_locks': True,
'innodb_thread_concurrency': 0,
'innodb_thread_sleep_delay': 10000,
'innodb_undo_directory': '.',
'innodb_undo_logs': 128,
'innodb_undo_tablespaces': 0,
'innodb_use_native_aio': False,
'innodb_use_sys_malloc': True,
'innodb_version': '5.6.23',
'innodb_write_io_threads': 4,
'insert_id': 0,
'interactive_timeout': 28800,
'join_buffer_size': 262144,
'keep_files_on_create': False,
'key_buffer_size': 8388608,
'key_cache_age_threshold': 300,
'key_cache_block_size': 1024,
'key_cache_division_limit': 100,
'large_files_support': True,
'large_page_size': 0,
'large_pages': False,
'last_insert_id': 0,
'lc_messages': 'en_US',
'lc_messages_dir': '/usr/local/Cellar/mysql/5.6.23/share/mysql/',
'lc_time_names': 'en_US',
'license': 'GPL',
'local_infile': True,
'lock_wait_timeout': 31536000,
'locked_in_memory': False,
'log_bin': False,
'log_bin_basename': None,
'log_bin_index': None,
'log_bin_trust_function_creators': False,
'log_bin_use_v1_row_events': False,
'log_error': '/usr/local/var/mysql/tiredpixel.home.err',
'log_output': 'FILE',
'log_queries_not_using_indexes': False,
'log_slave_updates': False,
'log_slow_admin_statements': False,
'log_slow_slave_statements': False,
'log_throttle_queries_not_using_indexes': 0,
'log_warnings': 1,
'long_query_time': 10.000000,
'low_priority_updates': False,
'lower_case_file_system': True,
'lower_case_table_names': 2,
'master_info_repository': 'FILE',
'master_verify_checksum': False,
'max_allowed_packet': 4194304,
'max_binlog_cache_size': 18446744073709547520,
'max_binlog_size': 1073741824,
'max_binlog_stmt_cache_size': 18446744073709547520,
'max_connect_errors': 100,
'max_connections': 151,
'max_delayed_threads': 20,
'max_error_count': 64,
'max_heap_table_size': 16777216,
'max_insert_delayed_threads': 20,
'max_join_size': 18446744073709551615,
'max_length_for_sort_data': 1024,
'max_prepared_stmt_count': 16382,
'max_relay_log_size': 0,
'max_seeks_for_key': 18446744073709551615,
'max_sort_length': 1024,
'max_sp_recursion_depth': 0,
'max_tmp_tables': 32,
'max_user_connections': 0,
'max_write_lock_count': 18446744073709551615,
'metadata_locks_cache_size': 1024,
'metadata_locks_hash_instances': 8,
'min_examined_row_limit': 0,
'multi_range_count': 256,
'myisam_data_pointer_size': 6,
'myisam_max_sort_file_size': 9223372036853727232,
'myisam_mmap_size': 18446744073709551615,
'myisam_recover_options': False,
'myisam_repair_threads': 1,
'myisam_sort_buffer_size': 8388608,
'myisam_stats_method': 'nulls_unequal',
'myisam_use_mmap': False,
'net_buffer_length': 16384,
'net_read_timeout': 30,
'net_retry_count': 10,
'net_write_timeout': 60,
'new': False,
'old': False,
'old_alter_table': False,
'old_passwords': 0,
'open_files_limit': 5000,
'optimizer_prune_level': 1,
'optimizer_search_depth': 62,
'optimizer_switch': 'index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,mrr=on,mrr_cost_based=on,block_nested_loop=on,batched_key_access=off,materialization=on,semijoin=on,loosescan=on,firstmatch=on,subquery_materialization_cost_based=on,use_index_extensions=on',
'optimizer_trace': 'enabled=off,one_line=off',
'optimizer_trace_features': 'greedy_search=on,range_optimizer=on,dynamic_range=on,repeated_subselect=on',
'optimizer_trace_limit': 1,
'optimizer_trace_max_mem_size': 16384,
'optimizer_trace_offset': -1,
'performance_schema': True,
'performance_schema_accounts_size': 100,
'performance_schema_digests_size': 10000,
'performance_schema_events_stages_history_long_size': 10000,
'performance_schema_events_stages_history_size': 10,
'performance_schema_events_statements_history_long_size': 10000,
'performance_schema_events_statements_history_size': 10,
'performance_schema_events_waits_history_long_size': 10000,
'performance_schema_events_waits_history_size': 10,
'performance_schema_hosts_size': 100,
'performance_schema_max_cond_classes': 80,
'performance_schema_max_cond_instances': 3504,
'performance_schema_max_file_classes': 50,
'performance_schema_max_file_handles': 32768,
'performance_schema_max_file_instances': 7693,
'performance_schema_max_mutex_classes': 200,
'performance_schema_max_mutex_instances': 15906,
'performance_schema_max_rwlock_classes': 40,
'performance_schema_max_rwlock_instances': 9102,
'performance_schema_max_socket_classes': 10,
'performance_schema_max_socket_instances': 322,
'performance_schema_max_stage_classes': 150,
'performance_schema_max_statement_classes': 168,
'performance_schema_max_table_handles': 4000,
'performance_schema_max_table_instances': 12500,
'performance_schema_max_thread_classes': 50,
'performance_schema_max_thread_instances': 402,
'performance_schema_session_connect_attrs_size': 512,
'performance_schema_setup_actors_size': 100,
'performance_schema_setup_objects_size': 100,
'performance_schema_users_size': 100,
'pid_file': '/usr/local/var/mysql/tiredpixel.home.pid',
'plugin_dir': '/usr/local/Cellar/mysql/5.6.23/lib/plugin/',
'port': 3306,
'preload_buffer_size': 32768,
'profiling': False,
'profiling_history_size': 15,
'protocol_version': 10,
'proxy_user': None,
'pseudo_slave_mode': False,
'pseudo_thread_id': 80,
'query_alloc_block_size': 8192,
'query_cache_limit': 1048576,
'query_cache_min_res_unit': 4096,
'query_cache_size': 1048576,
'query_cache_type': False,
'query_cache_wlock_invalidate': False,
'query_prealloc_size': 8192,
'rand_seed1': 0,
'rand_seed2': 0,
'range_alloc_block_size': 4096,
'read_buffer_size': 131072,
'read_only': False,
'read_rnd_buffer_size': 262144,
'relay_log': None,
'relay_log_basename': None,
'relay_log_index': None,
'relay_log_info_file': 'relay-log.info',
'relay_log_info_repository': 'FILE',
'relay_log_purge': True,
'relay_log_recovery': False,
'relay_log_space_limit': 0,
'report_host': None,
'report_password': None,
'report_port': 3306,
'report_user': None,
'rpl_stop_slave_timeout': 31536000,
'secure_auth': True,
'secure_file_priv': None,
'server_id': 0,
'server_id_bits': 32,
'server_uuid': '5d2f94a0-4658-11e4-92e7-0a41270292d6',
'sha256_password_private_key_path': 'private_key.pem',
'sha256_password_public_key_path': 'public_key.pem',
'simplified_binlog_gtid_recovery': False,
'skip_external_locking': True,
'skip_name_resolve': False,
'skip_networking': False,
'skip_show_database': False,
'slave_allow_batching': False,
'slave_checkpoint_group': 512,
'slave_checkpoint_period': 300,
'slave_compressed_protocol': False,
'slave_exec_mode': 'STRICT',
'slave_load_tmpdir': '/var/folders/wl/9pmj8jnd33d8pgrn9gd1gl5r0000gn/T/',
'slave_max_allowed_packet': 1073741824,
'slave_net_timeout': 3600,
'slave_parallel_workers': 0,
'slave_pending_jobs_size_max': 16777216,
'slave_rows_search_algorithms': 'TABLE_SCAN,INDEX_SCAN',
'slave_skip_errors': False,
'slave_sql_verify_checksum': True,
'slave_transaction_retries': 10,
'slave_type_conversions': None,
'slow_launch_time': 2,
'slow_query_log': False,
'slow_query_log_file': '/usr/local/var/mysql/tiredpixel-slow.log',
'socket': '/tmp/mysql.sock',
'sort_buffer_size': 262144,
'sql_auto_is_null': False,
'sql_big_selects': True,
'sql_buffer_result': False,
'sql_log_bin': True,
'sql_log_off': False,
'sql_mode': 'NO_ENGINE_SUBSTITUTION',
'sql_notes': True,
'sql_quote_show_create': True,
'sql_safe_updates': False,
'sql_select_limit': 18446744073709551615,
'sql_slave_skip_counter': 0,
'sql_warnings': False,
'ssl_ca': None,
'ssl_capath': None,
'ssl_cert': None,
'ssl_cipher': None,
'ssl_crl': None,
'ssl_crlpath': None,
'ssl_key': None,
'storage_engine': 'InnoDB',
'stored_program_cache': 256,
'sync_binlog': 0,
'sync_frm': True,
'sync_master_info': 10000,
'sync_relay_log': 10000,
'sync_relay_log_info': 10000,
'system_time_zone': 'BST',
'table_definition_cache': 1400,
'table_open_cache': 2000,
'table_open_cache_instances': 1,
'thread_cache_size': 9,
'thread_concurrency': 10,
'thread_handling': 'one-thread-per-connection',
'thread_stack': 262144,
'time_format': '%H:%i:%s',
'time_zone': 'SYSTEM',
'timed_mutexes': False,
'timestamp': 1430653686.849428,
'tmp_table_size': 16777216,
'tmpdir': '/var/folders/wl/9pmj8jnd33d8pgrn9gd1gl5r0000gn/T/',
'transaction_alloc_block_size': 8192,
'transaction_allow_batching': False,
'transaction_prealloc_size': 4096,
'tx_isolation': 'REPEATABLE-READ',
'tx_read_only': False,
'unique_checks': True,
'updatable_views_with_limit': True,
'version': '5.6.23',
'version_comment': 'Homebrew',
'version_compile_machine': 'x86_64',
'version_compile_os': 'osx10.10',
'wait_timeout': 28800,
'warning_count': 0}
def test_command_tool(self):
assert (Mysql.command_tool(3306, {}, 'SHOW VARIABLES') ==
['mysql', '--host', '127.0.0.1', '--port', 3306,
'--execute', 'SHOW VARIABLES',
'--batch', '--raw', '--column-names'])
def test_command_tool_user(self):
assert (Mysql.command_tool(3306, { 'user': "USER" }, 'SHOW VARIABLES') ==
['mysql', '--host', '127.0.0.1', '--port', 3306,
'--execute', 'SHOW VARIABLES',
'--batch', '--raw', '--column-names',
'--user=USER'])
def test_command_tool_password(self):
assert (Mysql.command_tool(3306, { 'password': 'PASS"WORD' }, 'SHOW VARIABLES') ==
['mysql', '--host', '127.0.0.1', '--port', 3306,
'--execute', 'SHOW VARIABLES',
'--batch', '--raw', '--column-names',
'--password=PASS"WORD'])
def test_enabled(self):
mysql = Mysql({}, { 3306: {} })
assert mysql.enabled() == True
def test_enabled_no_ports(self):
mysql = Mysql({}, {})
assert mysql.enabled() == False
def test_collect(self, monkeypatch):
monkeypatch.setattr(BasePortCommand, 'exec_command',
self.mock_exec_command)
mysql = Mysql({}, { 3306: {} })
metrics = mysql.collect()
metrics_t0 = {
'status': self.mock_collect_status(),
'master_status': self.mock_collect_master_status(),
'slave_status': self.mock_collect_slave_status(),
'slave_hosts': self.mock_collect_slave_hosts(),
'variables': self.mock_collect_variables()}
metrics_t = {
'status': self.mock_collect_status()}
for setting, v in Mysql.COLLECT_SETTING_DEFAULTS.items():
if v:
metrics_t[setting] = metrics_t0[setting]
assert metrics[3306] == metrics_t
for setting, v in Mysql.COLLECT_SETTING_DEFAULTS.items():
mysql2 = Mysql({}, { 3306: { 'collect': { setting: v } } })
metrics2 = mysql2.collect()
metrics_t2 = metrics_t.copy()
if v:
metrics_t2[setting] = metrics_t0[setting]
assert metrics2[3306] == metrics_t2
|
the-stack_0_16294
|
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module that implements an event loop based on twisted
( https://twistedmatrix.com ).
"""
from twisted.internet import reactor, protocol
from threading import Event, Thread, Lock
from functools import partial
import logging
import weakref
import atexit
from cassandra import OperationTimedOut
from cassandra.connection import Connection, ConnectionShutdown
from cassandra.protocol import RegisterMessage
log = logging.getLogger(__name__)
def _cleanup(cleanup_weakref):
try:
cleanup_weakref()._cleanup()
except ReferenceError:
return
class TwistedConnectionProtocol(protocol.Protocol):
"""
Twisted Protocol class for handling data received and connection
made events.
"""
def dataReceived(self, data):
"""
Callback function that is called when data has been received
on the connection.
Reaches back to the Connection object and queues the data for
processing.
"""
self.transport.connector.factory.conn._iobuf.write(data)
self.transport.connector.factory.conn.handle_read()
def connectionMade(self):
"""
Callback function that is called when a connection has succeeded.
Reaches back to the Connection object and confirms that the connection
is ready.
"""
self.transport.connector.factory.conn.client_connection_made()
def connectionLost(self, reason):
# reason is a Failure instance
self.transport.connector.factory.conn.defunct(reason.value)
class TwistedConnectionClientFactory(protocol.ClientFactory):
def __init__(self, connection):
# ClientFactory does not define __init__() in parent classes
# and does not inherit from object.
self.conn = connection
def buildProtocol(self, addr):
"""
Twisted function that defines which kind of protocol to use
in the ClientFactory.
"""
return TwistedConnectionProtocol()
def clientConnectionFailed(self, connector, reason):
"""
Overridden twisted callback which is called when the
connection attempt fails.
"""
log.debug("Connect failed: %s", reason)
self.conn.defunct(reason.value)
def clientConnectionLost(self, connector, reason):
"""
Overridden twisted callback which is called when the
connection goes away (cleanly or otherwise).
It should be safe to call defunct() here instead of just close, because
we can assume that if the connection was closed cleanly, there are no
callbacks to error out. If this assumption turns out to be false, we
can call close() instead of defunct() when "reason" is an appropriate
type.
"""
log.debug("Connect lost: %s", reason)
self.conn.defunct(reason.value)
class TwistedLoop(object):
_lock = None
_thread = None
def __init__(self):
self._lock = Lock()
def maybe_start(self):
with self._lock:
if not reactor.running:
self._thread = Thread(target=reactor.run,
name="cassandra_driver_event_loop",
kwargs={'installSignalHandlers': False})
self._thread.daemon = True
self._thread.start()
atexit.register(partial(_cleanup, weakref.ref(self)))
def _cleanup(self):
if self._thread:
reactor.callFromThread(reactor.stop)
self._thread.join(timeout=1.0)
if self._thread.is_alive():
log.warning("Event loop thread could not be joined, so "
"shutdown may not be clean. Please call "
"Cluster.shutdown() to avoid this.")
log.debug("Event loop thread was joined")
class TwistedConnection(Connection):
"""
An implementation of :class:`.Connection` that utilizes the
Twisted event loop.
"""
_loop = None
_total_reqd_bytes = 0
@classmethod
def initialize_reactor(cls):
if not cls._loop:
cls._loop = TwistedLoop()
@classmethod
def factory(cls, *args, **kwargs):
"""
A factory function which returns connections which have
succeeded in connecting and are ready for service (or
raises an exception otherwise).
"""
timeout = kwargs.pop('timeout', 5.0)
conn = cls(*args, **kwargs)
conn.connected_event.wait(timeout)
if conn.last_error:
raise conn.last_error
elif not conn.connected_event.is_set():
conn.close()
raise OperationTimedOut("Timed out creating connection")
else:
return conn
def __init__(self, *args, **kwargs):
"""
Initialization method.
Note that we can't call reactor methods directly here because
it's not thread-safe, so we schedule the reactor/connection
stuff to be run from the event loop thread when it gets the
chance.
"""
Connection.__init__(self, *args, **kwargs)
self.connected_event = Event()
self.is_closed = True
self.connector = None
self._callbacks = {}
reactor.callFromThread(self.add_connection)
self._loop.maybe_start()
def add_connection(self):
"""
Convenience function to connect and store the resulting
connector.
"""
self.connector = reactor.connectTCP(
host=self.host, port=self.port,
factory=TwistedConnectionClientFactory(self))
def client_connection_made(self):
"""
Called by twisted protocol when a connection attempt has
succeeded.
"""
with self.lock:
self.is_closed = False
self._send_options_message()
def close(self):
"""
Disconnect and error-out all callbacks.
"""
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.host)
self.connector.disconnect()
log.debug("Closed socket to %s", self.host)
if not self.is_defunct:
self.error_all_callbacks(
ConnectionShutdown("Connection to %s was closed" % self.host))
# don't leave in-progress operations hanging
self.connected_event.set()
def handle_read(self):
"""
Process the incoming data buffer.
"""
self.process_io_buffer()
def push(self, data):
"""
This function is called when outgoing data should be queued
for sending.
Note that we can't call transport.write() directly because
it is not thread-safe, so we schedule it to run from within
the event loop when it gets the chance.
"""
reactor.callFromThread(self.connector.transport.write, data)
def register_watcher(self, event_type, callback, register_timeout=None):
"""
Register a callback for a given event type.
"""
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=[event_type]),
timeout=register_timeout)
def register_watchers(self, type_callback_dict, register_timeout=None):
"""
Register multiple callback/event type pairs, expressed as a dict.
"""
for event_type, callback in type_callback_dict.items():
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=type_callback_dict.keys()),
timeout=register_timeout)
|
the-stack_0_16297
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import datetime
import logging
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from requests.exceptions import RequestException
plugin_name = 'pushover'
log = logging.getLogger(plugin_name)
PUSHOVER_URL = 'https://api.pushover.net/1/messages.json'
requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('pushover.net', '5 seconds'))
class PushoverNotifier(object):
"""
Example::
notify:
entries:
via:
- pushover:
user_key: <USER_KEY> (can also be a list of userkeys)
token: <TOKEN>
[device: <DEVICE_STRING>]
[priority: <PRIORITY>]
[url: <URL>]
[url_title: <URL_TITLE>]
[sound: <SOUND>]
[retry]: <RETRY>]
[expire]: <EXPIRE>]
[callback]: <CALLBACK>]
[html]: <HTML>]
"""
schema = {
'type': 'object',
'properties': {
'user_key': one_or_more({'type': 'string'}),
'api_key': {'type': 'string', 'default': 'aPwSHwkLcNaavShxktBpgJH4bRWc3m'},
'device': one_or_more({'type': 'string'}),
'priority': {'oneOf': [
{'type': 'number', 'minimum': -2, 'maximum': 2},
{'type': 'string'}]},
'url': {'type': 'string'},
'url_title': {'type': 'string'},
'sound': {'type': 'string'},
'retry': {'type': 'integer', 'minimum': 30},
'expire': {'type': 'integer', 'maximum': 86400},
'callback': {'type': 'string'},
'html': {'type': 'boolean'}
},
'required': ['user_key'],
'additionalProperties': False
}
def notify(self, title, message, config):
"""
Sends a Pushover notification
:param str title: the message's title
:param str message: the message to send
:param dict config: The pushover config
"""
notification = {'token': config.get('api_key'), 'message': message, 'title': title,
'device': config.get('device'), 'priority': config.get('priority'), 'url': config.get('url'),
'url_title': config.get('url_title'), 'sound': config.get('sound'),
'retry': config.get('retry'), 'expire': config.get('expire'),
'callback': config.get('callback')}
# HTML parsing mode
if config.get('html'):
notification['html'] = 1
# Support multiple devices
if isinstance(notification['device'], list):
notification['device'] = ','.join(notification['device'])
# Special case, verify certain fields exists if priority is 2
priority = config.get('priority')
expire = config.get('expire')
retry = config.get('retry')
if priority == 2 and not all([expire, retry]):
log.warning('Priority set to 2 but fields "expire" and "retry" are not both present.Lowering priority to 1')
notification['priority'] = 1
if not isinstance(config['user_key'], list):
config['user_key'] = [config['user_key']]
for user in config['user_key']:
notification['user'] = user
try:
response = requests.post(PUSHOVER_URL, data=notification)
except RequestException as e:
if e.response is not None:
if e.response.status_code == 429:
reset_time = datetime.datetime.fromtimestamp(
int(e.response.headers['X-Limit-App-Reset'])).strftime('%Y-%m-%d %H:%M:%S')
error_message = 'Monthly pushover message limit reached. Next reset: %s' % reset_time
else:
error_message = e.response.json()['errors'][0]
else:
error_message = str(e)
raise PluginWarning(error_message)
reset_time = datetime.datetime.fromtimestamp(
int(response.headers['X-Limit-App-Reset'])).strftime('%Y-%m-%d %H:%M:%S')
remaining = response.headers['X-Limit-App-Remaining']
log.debug('Pushover notification sent. Notifications remaining until next reset: %s. '
'Next reset at: %s', remaining, reset_time)
@event('plugin.register')
def register_plugin():
plugin.register(PushoverNotifier, plugin_name, api_ver=2, interfaces=['notifiers'])
|
the-stack_0_16298
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class AndroidMAMPolicy(Resource):
"""Android Policy entity for Intune MAM.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param tags: Resource Tags
:type tags: dict
:param location: Resource Location
:type location: str
:param friendly_name:
:type friendly_name: str
:param description:
:type description: str
:param app_sharing_from_level: Possible values include: 'none',
'policyManagedApps', 'allApps'. Default value: "none" .
:type app_sharing_from_level: str
:param app_sharing_to_level: Possible values include: 'none',
'policyManagedApps', 'allApps'. Default value: "none" .
:type app_sharing_to_level: str
:param authentication: Possible values include: 'required',
'notRequired'. Default value: "required" .
:type authentication: str
:param clipboard_sharing_level: Possible values include: 'blocked',
'policyManagedApps', 'policyManagedAppsWithPasteIn', 'allApps'. Default
value: "blocked" .
:type clipboard_sharing_level: str
:param data_backup: Possible values include: 'allow', 'block'. Default
value: "allow" .
:type data_backup: str
:param file_sharing_save_as: Possible values include: 'allow', 'block'.
Default value: "allow" .
:type file_sharing_save_as: str
:param pin: Possible values include: 'required', 'notRequired'. Default
value: "required" .
:type pin: str
:param pin_num_retry:
:type pin_num_retry: int
:param device_compliance: Possible values include: 'enable', 'disable'.
Default value: "enable" .
:type device_compliance: str
:param managed_browser: Possible values include: 'required',
'notRequired'. Default value: "required" .
:type managed_browser: str
:param access_recheck_offline_timeout:
:type access_recheck_offline_timeout: timedelta
:param access_recheck_online_timeout:
:type access_recheck_online_timeout: timedelta
:param offline_wipe_timeout:
:type offline_wipe_timeout: timedelta
:ivar num_of_apps:
:vartype num_of_apps: int
:ivar group_status: Possible values include: 'notTargeted', 'targeted'.
Default value: "notTargeted" .
:vartype group_status: str
:ivar last_modified_time:
:vartype last_modified_time: datetime
:param screen_capture: Possible values include: 'allow', 'block'. Default
value: "allow" .
:type screen_capture: str
:param file_encryption: Possible values include: 'required',
'notRequired'. Default value: "required" .
:type file_encryption: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'friendly_name': {'required': True},
'num_of_apps': {'readonly': True},
'group_status': {'readonly': True},
'last_modified_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'app_sharing_from_level': {'key': 'properties.appSharingFromLevel', 'type': 'str'},
'app_sharing_to_level': {'key': 'properties.appSharingToLevel', 'type': 'str'},
'authentication': {'key': 'properties.authentication', 'type': 'str'},
'clipboard_sharing_level': {'key': 'properties.clipboardSharingLevel', 'type': 'str'},
'data_backup': {'key': 'properties.dataBackup', 'type': 'str'},
'file_sharing_save_as': {'key': 'properties.fileSharingSaveAs', 'type': 'str'},
'pin': {'key': 'properties.pin', 'type': 'str'},
'pin_num_retry': {'key': 'properties.pinNumRetry', 'type': 'int'},
'device_compliance': {'key': 'properties.deviceCompliance', 'type': 'str'},
'managed_browser': {'key': 'properties.managedBrowser', 'type': 'str'},
'access_recheck_offline_timeout': {'key': 'properties.accessRecheckOfflineTimeout', 'type': 'duration'},
'access_recheck_online_timeout': {'key': 'properties.accessRecheckOnlineTimeout', 'type': 'duration'},
'offline_wipe_timeout': {'key': 'properties.offlineWipeTimeout', 'type': 'duration'},
'num_of_apps': {'key': 'properties.numOfApps', 'type': 'int'},
'group_status': {'key': 'properties.groupStatus', 'type': 'str'},
'last_modified_time': {'key': 'properties.lastModifiedTime', 'type': 'iso-8601'},
'screen_capture': {'key': 'properties.screenCapture', 'type': 'str'},
'file_encryption': {'key': 'properties.fileEncryption', 'type': 'str'},
}
def __init__(self, friendly_name, tags=None, location=None, description=None, app_sharing_from_level="none", app_sharing_to_level="none", authentication="required", clipboard_sharing_level="blocked", data_backup="allow", file_sharing_save_as="allow", pin="required", pin_num_retry=None, device_compliance="enable", managed_browser="required", access_recheck_offline_timeout=None, access_recheck_online_timeout=None, offline_wipe_timeout=None, screen_capture="allow", file_encryption="required"):
super(AndroidMAMPolicy, self).__init__(tags=tags, location=location)
self.friendly_name = friendly_name
self.description = description
self.app_sharing_from_level = app_sharing_from_level
self.app_sharing_to_level = app_sharing_to_level
self.authentication = authentication
self.clipboard_sharing_level = clipboard_sharing_level
self.data_backup = data_backup
self.file_sharing_save_as = file_sharing_save_as
self.pin = pin
self.pin_num_retry = pin_num_retry
self.device_compliance = device_compliance
self.managed_browser = managed_browser
self.access_recheck_offline_timeout = access_recheck_offline_timeout
self.access_recheck_online_timeout = access_recheck_online_timeout
self.offline_wipe_timeout = offline_wipe_timeout
self.num_of_apps = None
self.group_status = None
self.last_modified_time = None
self.screen_capture = screen_capture
self.file_encryption = file_encryption
|
the-stack_0_16302
|
from os import path
from setuptools import setup, find_packages
import sys
import versioneer
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for people with outdated setuptools
# and/or pip.
if sys.version_info < (3, 6):
error = """
niio does not support Python {0}.{2}.
Python 3.6 and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(3, 6)
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
setup(
name='niio',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Package for quickly loading common neuroimaging data.",
long_description=readme,
author="Kristian Eschenburg",
author_email='[email protected]',
url='https://github.com/kristianeschenburg/niio',
packages=find_packages(exclude=['docs', 'tests']),
entry_points={
'console_scripts': [
# 'some.module:some_function',
],
},
include_package_data=True,
package_data={
'niio': [
# When adding files here, remember to update MANIFEST.in as well,
# or else they will not be included in the distribution on PyPI!
# 'path/to/data_file',
]
},
install_requires=requirements,
license="BSD (3-clause)",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
|
the-stack_0_16304
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import boto3
from redis.sentinel import Sentinel
ALERT_EMAILS = ['[email protected]']
REDIS_SENTINEL_LIST = [("47.94.197.140",6379)]
#[("192.168.0.62", 26379), ("192.168.0.63", 26379), ("192.168.0.64", 26379)]
def send_email(to_address, subject, content):
ses = boto3.client('ses')
r = ses.send_email(Source = '"viabtc-exchange-alert" <[email protected]>',
Destination = {'ToAddresses': [to_address]},
Message = {'Subject': { 'Data': subject, 'Charset': 'utf-8'},
'Body': {'Text': {'Data': content, 'Charset': 'utf-8'}}})
def main():
last_send = None
sentinel = Sentinel(REDIS_SENTINEL_LIST)
redis_master = sentinel.master_for("mymaster", socket_timeout=120)
while True:
r = redis_master.blpop('alert:message', 60)
if not r:
continue
current_timestamp = int(time.time())
if last_send and current_timestamp - last_send < 60:
continue
last_send = current_timestamp
message = r[1]
for email in ALERT_EMAILS:
send_email(email, "viabtc server error", message)
if __name__ == '__main__':
main()
|
the-stack_0_16305
|
from django.conf import settings
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from django.urls import include, path
from django.views.generic import TemplateView
urlpatterns = [
path(
"",
login_required(TemplateView.as_view(template_name="homepage.html")),
name="homepage",
),
path(
"accounts/login/",
auth_views.LoginView.as_view(template_name="login.html"),
name="login",
),
path(
"accounts/logout/",
auth_views.LogoutView.as_view(next_page=settings.LOGIN_URL),
name="logout",
),
path("admin/", admin.site.urls),
path("problems/", include("problems.urls")),
path("quizzes/", include("quizzes.urls")),
path("students/", include("students.urls")),
]
|
the-stack_0_16306
|
# -*- coding: utf-8 -*-
from .socketservice import get_instance
from .alarm_service import AlarmService
import binascii
import time
from app.models.device import Device as DeviceModel
from app.models.line import Line
from app.models.monitor import Monitor
from datetime import datetime
from app.libs.utils import dynamic_decimal
from app.libs import command
from app.libs.utils import command_encode
from app.libs.error_code import DeviceException
class Device:
energy_sign = ["0B04", "0C04", "0D04", "OE04"]
energy_line = {
"0B04": 0,
"0C04": 1,
"0D04": 2,
"0E04": 3
}
upload_sign = ["CD11", "CD12", "CD21", "CD22", "CD31", "CD32", "CD41", "CD42"]
upload_type = {
"1": "ua",
"2": "energy"
}
upload_line = {
"1": 0,
"2": 1,
"3": 2,
"4": 3
}
di = {
"1": "off",
"0": "on"
}
do = {
"1": "on",
"0": "off"
}
def __init__(self):
self.device_id = None
self.cloud_id = None
self.version = None
self.sign = None
def parse_data(self, data, socket):
print(datetime.now().strftime('%H:%M:%S'), data)
if str.startswith(data, "AA55"):
self.parse_device_data(data, socket)
elif not self.check_register(socket):
Device.get_basic_info(socket)
elif str.startswith(data, "6403") or str.startswith(data, "DD016403"):
self.parse_modbus_data(data, socket)
elif self.check_energy_data(data):
self.parse_energy_data(data, socket)
elif self.check_upload_data(data):
self.parse_upload_data(data, socket)
else:
print("其他数据", data)
@staticmethod
def get_basic_info(socket):
"""获取基础信息,初始化"""
print("获取cloud_id")
socket.request.sendall(binascii.a2b_hex("AA550006E00000910177"))
time.sleep(0.5)
print("获取信号强度")
socket.request.sendall(binascii.a2b_hex("AA550004E0230107"))
time.sleep(0.5)
print("获取线路状态")
socket.request.sendall(binascii.a2b_hex("6403001000084C3C"))
@staticmethod
def get_cloud_id(socket):
"""获取云ID"""
socket.request.sendall(command_encode(command.BASIC["cloud_id"]))
@staticmethod
def get_version(socket):
"""获取版本"""
socket.request.sendall(command_encode(command.BASIC["version"]))
@staticmethod
def get_sign_strength(socket):
"""获取信号强度"""
socket.request.sendall(command_encode(command.BASIC["sign"]))
@staticmethod
def operate(line, operate, socket):
"""操作电路通断"""
operate = operate.upper()
if line in getattr(command, operate).keys():
socket.request.sendall(command_encode(getattr(command, operate)[line]))
@staticmethod
def get_box_detail(socket):
for e in command.ENERGY.values():
socket.request.sendall(command_encode(e))
time.sleep(0.5)
for u in command.UA.values():
socket.request.sendall(command_encode(u))
time.sleep(0.5)
@staticmethod
def send_command(socket, command):
socket.request.sendall(command_encode(command))
def parse_device_data(self, data, socket):
if not self.check_devicec_data(data):
return
if str.startswith(data, "aa550010e000000a0091".upper()):
cloud_id = self.parse_cloud_id(data)
self.register(cloud_id, socket)
if "EE01" in data:
version = self.parse_version(data)
self.version = version
print("version", version)
if "E023" in data:
sign = self.parse_sign(data)
socket.sign = sign
self.sign = sign
print("sign", sign)
def parse_modbus_data(self, data, socket):
if str.startswith(data, "DD01"):
data = data[4:]
if not self.check_modbus_data(data):
return
data = data[6:-4]
status_map = {
1: data[3:4],
2: data[7:8],
3: data[11:12],
4: data[15:18]
}
do_map = {
1: data[19:20],
2: data[23:24],
3: data[27:28],
4: data[31:32]
}
lines = Line.objects.filter(device_id=socket.device_id).all()
for line in lines:
if line.line in status_map.keys():
status = self.di[status_map[line.line]]
do = self.do[do_map[line.line]]
value = {
"device_id": socket.device_id,
"line_id": line.id,
"line": line.line,
"type": "status",
"value": {
"status": status,
"do": do
}
}
Device.switch_alarm(line, status)
Monitor.save_data(value)
def parse_cloud_id(self, data):
cloud_id = data[20:-4]
return cloud_id
def parse_version(self, data):
sign_index = data.index("EE01")
version = data[sign_index + 4:-4]
return version
def parse_sign(self, data):
sign_index = data.index("E023")
sign = data[sign_index + 4:-4]
return int(sign, 16)
def parse_line_status(self, data):
pass
def parse_energy_data(self, data, socket):
sign = data[0:4]
line = Line.objects.filter(line=self.energy_line[sign], device_id=socket.device_id).first()
value = {
"device_id": socket.device_id,
"line": line.line,
"line_id": line.id
}
if len(data) < 20:
data = data[6:-4]
energy = int(data, 16) // 100
value["type"] = "energy"
value["value"] = energy
else:
data = data[6:-4]
voltage_a = int(data[0:4], 16) // 10
voltage_b = int(data[4:8], 16) // 10
voltage_c = int(data[8:12], 16) // 10
value["type"] = "voltage"
value["value"] = {
"a": voltage_a,
"b": voltage_b,
"c": voltage_c
}
Monitor.save_data(value)
electricity_a = dynamic_decimal((int(data[12:16], 16) / 100))
electricity_b = dynamic_decimal((int(data[16:20], 16) / 100))
electricity_c = dynamic_decimal((int(data[20:24], 16) / 100))
value["type"] = "electricity"
value["value"] = {
"a": electricity_a,
"b": electricity_b,
"c": electricity_c
}
Monitor.save_data(value)
print("这是解析电能数据", socket.line_status)
def parse_upload_data(self, data, socket):
print("开始解析upload")
print(socket.device_id)
line = Line.objects.filter(line=self.upload_line[data[2]], device_id=socket.device_id).first()
type = self.upload_type[data[3]]
value = {
"device_id": socket.device_id,
"line": line.line,
"line_id": line.id
}
if type == "energy":
energy = (int(data[10:18], 16) // 100)
value["type"] = "energy"
value["value"] = energy
Monitor.save_data(value)
if type == "ua":
electricity_a = dynamic_decimal((int(data[22:26], 16)) / 100)
electricity_b = dynamic_decimal((int(data[26:30], 16)) / 100)
electricity_c = dynamic_decimal((int(data[30:34], 16)) / 100)
value["type"] = "electricity"
value["value"] = {
"a": electricity_a,
"b": electricity_b,
"c": electricity_c
}
Monitor.save_data(value)
voltage_a = int(data[10:14], 16) // 10
voltage_b = int(data[14:18], 16) // 10
voltage_c = int(data[18:22], 16) // 10
value["type"] = "voltage"
value["value"] = {
"a": voltage_a,
"b": voltage_b,
"c": voltage_c
}
Monitor.save_data(value)
Device.current_alarm(line, electricity_a, electricity_b, electricity_c)
socket.timestamp = int(round(time.time() * 1000))
@staticmethod
def status(socket):
"""获取线路状态"""
print("获取线路状态")
socket.request.sendall(command_encode(command.BASIC["line_status"]))
def check_devicec_data(self, data):
if len(data) < 8:
return False
length = int(data[4:8], 16) * 2
if len(data[8:]) == length:
return True
else:
return False
def check_modbus_data(self, data):
length = int(data[4:6], 16) * 2
print("length", length)
print("data", len(data[6:-4]))
if len(data[6:-4]) == length:
return True
else:
return False
def check_upload_data(self, data):
if data[0:4] in self.upload_sign:
return True
else:
return False
def check_register(self, socket):
if socket.cloud_id is None:
return False
else:
return True
def check_energy_data(self, data):
if data[0:4] in self.energy_sign:
if self.check_modbus_data(data):
return True
else:
return False
else:
return False
def register(self, cloud_id, socket):
socket.cloud_id = cloud_id
self.cloud_id = cloud_id
device = DeviceModel.objects.filter(cloud_id=cloud_id).first()
if device:
socket.device_id = device["device_id"]
get_instance().add_client(cloud_id, socket)
@staticmethod
def update_device(socket):
for i in range(0, 3):
Device.status(socket)
time.sleep(0.5)
@staticmethod
def search_device_socket(device_id):
device = DeviceModel.objects.filter(device_id=device_id).first_or_404()
cloud_id = device["cloud_id"]
clients = get_instance().clients
if cloud_id in clients.keys():
return clients[cloud_id]
else:
raise DeviceException()
@staticmethod
def operate_device_plan(device_id, line_id, operate):
device = DeviceModel.objects.filter(device_id=device_id).first()
if device:
cloud_id = device["cloud_id"]
clients = get_instance().clients
if cloud_id in clients:
line = Line.objects.filter(id=line_id).first()
socket = clients[cloud_id]
socket.request.sendall(command_encode(getattr(command, operate)[line.line]))
@staticmethod
def current_alarm(line, la, lb, lc):
limit = line.limit
total = la + lb + lc
if (limit * (line.standard / 100)) < total < (limit * 1.1):
alarm = AlarmService(device_id=line.device_id, alarm_type="high_current", line_id=line.id, a=la, b=lb, c=lc,
limit=line.limit)
alarm.gen_alarm()
if total > (limit * 1.1):
alarm = AlarmService(device_id=line.device_id, alarm_type="overload", line_id=line.id, a=la, b=lb, c=lc)
alarm.gen_alarm()
@staticmethod
def offline_alarm(socket):
device_id = socket.device_id
alarm = AlarmService(device_id=device_id, alarm_type="offline", line_id=None)
alarm.gen_alarm()
@staticmethod
def trip_alarm(line):
alarm = AlarmService(device_id=line.device_id, alarm_type="trip", line_id=line.id)
alarm.gen_alarm()
@staticmethod
def switch_alarm(line, status):
monitor = Monitor.objects.filter(device_id=line.device_id, line_id=line.id, type="status").first()
if monitor:
if monitor["value"]["status"] != status:
if status == "on":
alarm = AlarmService(device_id=line.device_id, alarm_type="switch", line_id=line.id, type="on")
alarm.gen_alarm()
else:
alarm = AlarmService(device_id=line.device_id, alarm_type="switch", line_id=line.id, type="off")
alarm.gen_alarm()
@staticmethod
def operate_job(line, operate, *args):
device_id = line.device_id
device = DeviceModel.objects.filter(device_id=device_id).first()
cloud_id = device.cloud_id
clients = get_instance().clients
if cloud_id in clients.keys():
socket = clients[cloud_id]
for i in range(0, 3):
Device.operate(line.line, operate, socket)
time.sleep(0.5)
|
the-stack_0_16307
|
""" module to methods to main """
import sys
import logging
from .migrate_isis import migrate_isis_parser
from .migrate_articlemeta import migrate_articlemeta_parser
from .tools import tools_parser
logger = logging.getLogger(__name__)
def main_migrate_articlemeta():
""" method main to script setup.py """
try:
sys.exit(migrate_articlemeta_parser(sys.argv[1:]))
except KeyboardInterrupt:
# É convencionado no shell que o programa finalizado pelo signal de
# código N deve retornar o código N + 128.
sys.exit(130)
except Exception as exc:
logger.exception(
"erro durante a execução da função "
"'migrate_articlemeta_parser' com os args %s",
sys.argv[1:],
)
sys.exit("Um erro inexperado ocorreu: %s" % exc)
def main_migrate_isis():
sys.exit(migrate_isis_parser(sys.argv[1:]))
def tools():
sys.exit(tools_parser(sys.argv[1:]))
if __name__ == "__main__":
sys.exit(migrate_articlemeta_parser(sys.argv[1:]))
|
the-stack_0_16309
|
from modelvshuman import Plot, Evaluate
from modelvshuman import constants as c
from plotting_definition import plotting_definition_template
def run_evaluation():
models = ["resnet50", "bagnet33", "simclr_resnet50x1"]
datasets = c.DEFAULT_DATASETS # or e.g. ["cue-conflict", "uniform-noise"]
params = {"batch_size": 64, "print_predictions": True, "num_workers": 20}
Evaluate()(models, datasets, **params)
def run_plotting():
plot_types = c.DEFAULT_PLOT_TYPES # or e.g. ["accuracy", "shape-bias"]
plotting_def = plotting_definition_template
figure_dirname = "example-figures/"
Plot(plot_types = plot_types, plotting_definition = plotting_def,
figure_directory_name = figure_dirname)
# In examples/plotting_definition.py, you can edit
# plotting_definition_template as desired: this will let
# the toolbox know which models to plot, and which colours to use etc.
if __name__ == "__main__":
# 1. evaluate models on out-of-distribution datasets
run_evaluation()
# 2. plot the evaluation results
run_plotting()
|
the-stack_0_16311
|
"""
=====================================================================
Compute Power Spectral Density of inverse solution from single epochs
=====================================================================
Compute PSD of dSPM inverse solution on single trial epochs restricted
to a brain label. The PSD is computed using a multi-taper method with
Discrete Prolate Spheroidal Sequence (DPSS) windows.
"""
# Author: Martin Luessi <[email protected]>
#
# License: BSD-3-Clause
# %%
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, compute_source_psd_epochs
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
subjects_dir = data_path + '/subjects'
event_id, tmin, tmax = 1, -0.2, 0.5
snr = 1.0 # use smaller SNR for raw data
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
inverse_operator = read_inverse_operator(fname_inv)
label = mne.read_label(fname_label)
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
# Set up pick list
include = []
raw.info['bads'] += ['EEG 053'] # bads + 1 more
# pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
include=include, exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
# define frequencies of interest
fmin, fmax = 0., 70.
bandwidth = 4. # bandwidth of the windows in Hz
# %%
# Compute source space PSD in label
# ---------------------------------
#
# ..note:: By using "return_generator=True" stcs will be a generator object
# instead of a list. This allows us so to iterate without having to
# keep everything in memory.
n_epochs_use = 10
stcs = compute_source_psd_epochs(epochs[:n_epochs_use], inverse_operator,
lambda2=lambda2,
method=method, fmin=fmin, fmax=fmax,
bandwidth=bandwidth, label=label,
return_generator=True, verbose=True)
# compute average PSD over the first 10 epochs
psd_avg = 0.
for i, stc in enumerate(stcs):
psd_avg += stc.data
psd_avg /= n_epochs_use
freqs = stc.times # the frequencies are stored here
stc.data = psd_avg # overwrite the last epoch's data with the average
# %%
# Visualize the 10 Hz PSD:
brain = stc.plot(initial_time=10., hemi='lh', views='lat', # 10 HZ
clim=dict(kind='value', lims=(20, 40, 60)),
smoothing_steps=3, subjects_dir=subjects_dir)
brain.add_label(label, borders=True, color='k')
# %%
# Visualize the entire spectrum:
fig, ax = plt.subplots()
ax.plot(freqs, psd_avg.mean(axis=0))
ax.set_xlabel('Freq (Hz)')
ax.set_xlim(stc.times[[0, -1]])
ax.set_ylabel('Power Spectral Density')
|
the-stack_0_16312
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: game.py
# -------------------
# Divine Oasis
# Text Based RPG Game
# By wsngamerz
# -------------------
import divineoasis
import logging
import logging.config
import os
import platform
import pyglet
import sys
from divineoasis.assets import Assets, Directories
from divineoasis.config import Config
from divineoasis.colours import Colours
from divineoasis.scene_manager import SceneManager
from pyglet.window import Window
class DivineOasis:
def __init__(self, debug: bool = False):
self.debug = debug
if self.debug:
if platform.system() == "Windows":
# Set larger console
os.system("mode con: cols=200 lines=9999")
if platform.system() != "Linux":
import pyglet_ffmpeg
pyglet_ffmpeg.load_ffmpeg()
# Enable Colours using black magic
os.system("")
# Setup Logging
self.game_logger = self.setup_logging(debug)
# Get basic system information
self.system_data = {}
self.system_info()
# Basic classes
self.game_config = Config()
self.game_config.load()
self.game_assets = Assets(self.game_config.get("language.lang"))
# setup Pyglet
pyglet.options['audio'] = ('openal', 'pulse', 'directsound', 'silent')
vsync_enabled = self.game_config.get("graphics.vsync")
self.window = Window(1280, 720)
self.window.set_vsync(vsync_enabled)
# TODO: Fix fullscreen mode
# self.window.set_fullscreen(self.game_config.get("fullscreen"))
self.window.set_caption(self.game_assets.get("lang.title.main_title"))
fps_limit = self.game_config.get("graphics.fps")
self.scene_manager = SceneManager(self.game_assets, self.window)
if vsync_enabled:
pyglet.clock.schedule(self.scene_manager.update)
else:
pyglet.clock.schedule_interval(self.scene_manager.update, 1.0 / fps_limit)
def start(self):
self.game_logger.info(f"Starting Divine Oasis { divineoasis.__version__ }")
# Start Pyglet loop
pyglet.app.run()
@staticmethod
def setup_logging(debug: bool):
if debug:
level = "DEBUG"
else:
level = "INFO"
logging.config.dictConfig({
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"
}
},
"handlers": {
"default": {
"class": "logging.StreamHandler",
"formatter": "standard"
}
},
"loggers": {
"": {
"handlers": ["default"],
"propagate": True,
"level": level
}
}
})
logging.addLevelName(logging.DEBUG, Colours.BOLD + Colours.BRIGHT_CYAN + "DEBUG" + Colours.RESET)
logging.addLevelName(logging.INFO, Colours.BOLD + Colours.BRIGHT_BLUE + "INFO" + Colours.RESET)
logging.addLevelName(logging.WARNING, Colours.BOLD + Colours.BRIGHT_YELLOW + "WARNING" + Colours.RESET)
logging.addLevelName(logging.ERROR, Colours.BOLD + Colours.BRIGHT_RED + "ERROR" + Colours.RESET)
logging.addLevelName(logging.CRITICAL, Colours.BOLD + Colours.BRIGHT_RED + Colours.BLINK + "CRITICAL" + Colours.RESET)
return logging.getLogger(__name__)
def system_info(self):
self.system_data = {
"arguments": sys.argv,
"python_version": sys.version,
"os": platform.system(),
"os_release": platform.release(),
"os_version": platform.version(),
"os_arch": platform.machine(),
"os_platform": platform.platform()
}
self.game_logger.debug("=*=*=*=*=*=*=*=*=*=*=*= Debug Information =*=*=*=*=*=*=*=*=*=*=*=")
self.game_logger.debug(f" Arguments: { self.system_data['arguments'] }")
self.game_logger.debug(f" Python Version: { self.system_data['python_version'] }")
self.game_logger.debug(f" OS: { self.system_data['os'] }")
self.game_logger.debug(f" OS Version: { self.system_data['os_version'] }")
self.game_logger.debug(f" OS Release: { self.system_data['os_release'] }")
self.game_logger.debug(f" OS Architecture: { self.system_data['os_arch'] }")
self.game_logger.debug(f" OS Platform: { self.system_data['os_platform'] }")
self.game_logger.debug("=*=*=*=*=*=*=*=*=*=*=*=*=* Directories *=*=*=*=*=*=*=*=*=*=*=*=*=")
self.game_logger.debug(f" Application Root: { Directories().application_root }")
self.game_logger.debug(f" Assets Directory: { Directories().assets_directory }")
self.game_logger.debug(f" Data Directory: { Directories().data_directory }")
self.game_logger.debug(f" Config Location: { Directories().config_location }")
self.game_logger.debug("=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=")
|
the-stack_0_16313
|
"""
Programmer : EOF
File : config.py
Date : 2016.01.06
E-mail : [email protected]
License : MIT License
Description :
This is a configure file for this project.
"""
DEBUG_MODEL = True
USING_CASCADE = False
# training set directory for face and non-face images
TRAINING_FACE = "E:/JasonLeasterGraduationProject/FaceDetection/TrainingImages/FACES/"
TRAINING_NONFACE = "E:/JasonLeasterGraduationProject/FaceDetection/TrainingImages/NFACES/"
# test set directory for face and non-face images
TEST_FACE = "./TrainingImages/FACES/"
TEST_NONFACE = "./TrainingImages/NFACES/"
# single image for testing
TEST_IMG = "./Test/soccer.gif"
FEATURE_FILE_TRAINING = "./features/features_train.cache"
FEATURE_FILE_TESTING = "./features/features_test.cache"
FEATURE_FILE_SUBSET = "./features/features_train_subset"
FEATURE_FILE_SUBSET_0 = "./features/features_train_subset0.cache"
FEATURE_FILE_SUBSET_1 = "./features/features_train_subset1.cache"
# For parallel
PROCESS_NUM = 2
ADABOOST_CACHE_FILE = "./model/adaboost_classifier.cache"
ROC_FILE = "./model/roc.cache"
FIGURES = "./figure/"
# image size in the training set 19 * 19
TRAINING_IMG_HEIGHT = 19
TRAINING_IMG_WIDTH = 19
# How many different types of Haar-feature
FEATURE_TYPE_NUM = 5
# How many number of features that a single training image have
FEATURE_NUM = 37862
#FEATURE_NUM = 16373
#FEATURE_NUM = 49608
# number of positive and negative sample will be used in the training process
POSITIVE_SAMPLE = 4800
NEGATIVE_SAMPLE = 9000
SAMPLE_NUM = POSITIVE_SAMPLE + NEGATIVE_SAMPLE
TESTING_POSITIVE_SAMPLE = 20
TESTING_NEGATIVE_SAMPLE = 20
TESTING_SAMPLE_NUM = TESTING_NEGATIVE_SAMPLE + TESTING_POSITIVE_SAMPLE
LABEL_POSITIVE = +1
LABEL_NEGATIVE = -1
WHITE = 255
BLACK = 0
EXPECTED_TPR = 0.999
EXPECTED_FPR = 0.0005
# for CASCADE
EXPECTED_FPR_PRE_LAYYER = 0.1
EXPECTED_TPR_PRE_LAYYER = 0.999
# the threshold range of adaboost. (from -inf to +inf)
AB_TH_MIN = -15
AB_TH_MAX = +15
HAAR_FEATURE_TYPE_I = "I"
HAAR_FEATURE_TYPE_II = "II"
HAAR_FEATURE_TYPE_III = "III"
HAAR_FEATURE_TYPE_IV = "IV"
HAAR_FEATURE_TYPE_V = "V"
AB_TH = -3.
OVER_LAP_TH = 0.1
MAX_WEAK_NUM = 12
CASACADE_LIMIT = 3
ADABOOST_LIMIT = 150
SEARCH_WIN_STEP = 4
DETECT_START = 1.
DETECT_END = 2.
DETECT_STEP = 0.2
|
the-stack_0_16316
|
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
import dataset84
import model
from unet import UNet,CNNEncoder
def main():
# init conv net
print("init net")
unet = UNet(3,1)
if os.path.exists("./unet.pkl"):
unet.load_state_dict(torch.load("./unet.pkl"))
print("load unet")
unet.cuda()
cnn = CNNEncoder()
if os.path.exists("./cnn.pkl"):
cnn.load_state_dict(torch.load("./cnn.pkl"))
print("load cnn")
cnn.cuda()
# init dataset
print("init dataset")
data_loader = dataset84.jump_data_loader()
# init optimizer
unet_optimizer = torch.optim.Adam(unet.parameters(),lr=0.001)
cnn_optimizer = torch.optim.Adam(cnn.parameters(),lr = 0.001)
criterion = nn.MSELoss()
# train
print("training...")
for epoch in range(1000):
for i, (images, press_times) in enumerate(data_loader):
images = Variable(images).cuda()
press_times = Variable(press_times.float()).cuda()
masks = unet(images)
segmentations = images * masks
predict_press_times = cnn(segmentations)
loss = criterion(predict_press_times,press_times)
unet_optimizer.zero_grad()
cnn_optimizer.zero_grad()
loss.backward()
unet_optimizer.step()
cnn_optimizer.step()
if (i+1) % 10 == 0:
print("epoch:",epoch,"step:",i,"loss:",loss.data[0])
if (epoch+1) % 5 == 0 and i == 0:
torch.save(unet.state_dict(),"./unet.pkl")
torch.save(cnn.state_dict(),"./cnn.pkl")
print("save model")
if __name__ == '__main__':
main()
|
the-stack_0_16317
|
from django.template.response import TemplateResponse
from rest_framework.settings import api_settings
from django.core.paginator import Paginator
from rest_framework import viewsets, permissions
from . import models
from . import serializers
class ProductViewset(viewsets.ModelViewSet):
permission_classes = [permissions.DjangoModelPermissions]
serializer_class = serializers.ProductSerializer
filterset_fields = {
'id': ['exact'],
'name': ['exact' , 'contains'],
'description': ['exact', 'contains'],
'team': ['exact'],
'creator': ['exact'],
'imagesets': ['exact'],
'annotationtype': ['exact'],
}
def get_queryset(self):
user = self.request.user
return models.Product.objects.filter(team__in=user.team_set.all()).select_related('creator', 'team').order_by('id')
def create(self, request):
user = self.request.user
if "creator" not in request.data:
request.data["creator"] = user.id
response = super().create(request)
return response
def list(self, request, *args, **kwargs):
if "api" in request.META['PATH_INFO']:
return super(ProductViewset, self).list(request, *args, **kwargs)
else:
products = self.filter_queryset(self.get_queryset()).order_by('team', 'id')
current_query = request.META['QUERY_STRING']
if "page" not in request.query_params:
current_query += "&page=1"
page_id = 1
else:
page_id = int(request.query_params.get('page', 1))
limit = int(request.query_params.get('limit', api_settings.PAGE_SIZE))
paginator = Paginator(products, limit)
page = paginator.get_page(page_id)
previous_query = first_query = current_query.replace("&page="+str(page_id), "&page=1")
if page.has_previous():
previous_query = current_query.replace("&page="+str(page_id), "&page={}".format(page.previous_page_number()))
next_query = last_query = current_query.replace("&page="+str(page_id), "&page={}".format(paginator.num_pages))
if page.has_next():
next_query = current_query.replace("&page="+str(page_id), "&page={}".format(page.next_page_number()))
return TemplateResponse(request, 'base/explore.html', {
'mode': 'products',
'products': page, # to separate what kind of stuff is displayed in the view
'paginator': page, # for page stuff
'first_query': first_query,
'previous_query': previous_query,
'next_query': next_query,
'last_query': last_query,
#'filter': self.filterset_class
})
|
the-stack_0_16319
|
"""
MNIST example with training and validation monitoring using TensorboardX and Tensorboard.
Requirements:
TensorboardX (https://github.com/lanpa/tensorboard-pytorch): `pip install tensorboardX`
Tensorboard: `pip install tensorflow` (or just install tensorboard without the rest of tensorflow)
Usage:
Start tensorboard:
```bash
tensorboard --logdir=/tmp/tensorboard_logs/
```
Run the example:
```bash
python mnist_with_tensorboardx.py --log_dir=/tmp/tensorboard_logs
```
"""
from __future__ import print_function
from argparse import ArgumentParser
import torch
from torch.utils.data import DataLoader
from torch import nn
import torch.nn.functional as F
from torch.optim import SGD
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, ToTensor, Normalize
try:
from tensorboardX import SummaryWriter
except ImportError:
raise RuntimeError("No tensorboardX package is found. Please install with the command: \npip install tensorboardX")
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True),
batch_size=train_batch_size, shuffle=True)
val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False),
batch_size=val_batch_size, shuffle=False)
return train_loader, val_loader
def create_summary_writer(model, data_loader, log_dir):
writer = SummaryWriter(logdir=log_dir)
data_loader_iter = iter(data_loader)
x, y = next(data_loader_iter)
try:
writer.add_graph(model, x)
except Exception as e:
print("Failed to save model graph: {}".format(e))
return writer
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, log_dir):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
writer = create_summary_writer(model, train_loader, log_dir)
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(model,
metrics={'accuracy': Accuracy(),
'nll': Loss(F.nll_loss)},
device=device)
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
print("Epoch[{}] Iteration[{}/{}] Loss: {:.2f}"
"".format(engine.state.epoch, iter, len(train_loader), engine.state.output))
writer.add_scalar("training/loss", engine.state.output, engine.state.iteration)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
print("Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_accuracy, avg_nll))
writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
print("Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_accuracy, avg_nll))
writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch)
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
writer.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--batch_size', type=int, default=64,
help='input batch size for training (default: 64)')
parser.add_argument('--val_batch_size', type=int, default=1000,
help='input batch size for validation (default: 1000)')
parser.add_argument('--epochs', type=int, default=10,
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5,
help='SGD momentum (default: 0.5)')
parser.add_argument('--log_interval', type=int, default=10,
help='how many batches to wait before logging training status')
parser.add_argument("--log_dir", type=str, default="tensorboard_logs",
help="log directory for Tensorboard log output")
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum,
args.log_interval, args.log_dir)
|
the-stack_0_16320
|
import csv
import email.message
import json
import logging
import pathlib
import re
import zipfile
from typing import (
IO,
TYPE_CHECKING,
Collection,
Container,
Iterable,
Iterator,
List,
Optional,
Tuple,
Union,
)
from pip._vendor.packaging.requirements import Requirement
from pip._vendor.packaging.specifiers import InvalidSpecifier, SpecifierSet
from pip._vendor.packaging.utils import NormalizedName
from pip._vendor.packaging.version import LegacyVersion, Version
from pip._internal.exceptions import NoneMetadataError
from pip._internal.locations import site_packages, user_site
from pip._internal.models.direct_url import (
DIRECT_URL_METADATA_NAME,
DirectUrl,
DirectUrlValidationError,
)
from pip._internal.utils.compat import stdlib_pkgs # TODO: Move definition here.
from pip._internal.utils.egg_link import (
egg_link_path_from_location,
egg_link_path_from_sys_path,
)
from pip._internal.utils.misc import is_local, normalize_path
from pip._internal.utils.urls import url_to_path
if TYPE_CHECKING:
from typing import Protocol
else:
Protocol = object
DistributionVersion = Union[LegacyVersion, Version]
InfoPath = Union[str, pathlib.PurePosixPath]
logger = logging.getLogger(__name__)
class BaseEntryPoint(Protocol):
@property
def name(self) -> str:
raise NotImplementedError()
@property
def value(self) -> str:
raise NotImplementedError()
@property
def group(self) -> str:
raise NotImplementedError()
def _convert_installed_files_path(
entry: Tuple[str, ...],
info: Tuple[str, ...],
) -> str:
"""Convert a legacy installed-files.txt path into modern RECORD path.
The legacy format stores paths relative to the info directory, while the
modern format stores paths relative to the package root, e.g. the
site-packages directory.
:param entry: Path parts of the installed-files.txt entry.
:param info: Path parts of the egg-info directory relative to package root.
:returns: The converted entry.
For best compatibility with symlinks, this does not use ``abspath()`` or
``Path.resolve()``, but tries to work with path parts:
1. While ``entry`` starts with ``..``, remove the equal amounts of parts
from ``info``; if ``info`` is empty, start appending ``..`` instead.
2. Join the two directly.
"""
while entry and entry[0] == "..":
if not info or info[-1] == "..":
info += ("..",)
else:
info = info[:-1]
entry = entry[1:]
return str(pathlib.Path(*info, *entry))
class BaseDistribution(Protocol):
def __repr__(self) -> str:
return f"{self.raw_name} {self.version} ({self.location})"
def __str__(self) -> str:
return f"{self.raw_name} {self.version}"
@property
def location(self) -> Optional[str]:
"""Where the distribution is loaded from.
A string value is not necessarily a filesystem path, since distributions
can be loaded from other sources, e.g. arbitrary zip archives. ``None``
means the distribution is created in-memory.
Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If
this is a symbolic link, we want to preserve the relative path between
it and files in the distribution.
"""
raise NotImplementedError()
@property
def editable_project_location(self) -> Optional[str]:
"""The project location for editable distributions.
This is the directory where pyproject.toml or setup.py is located.
None if the distribution is not installed in editable mode.
"""
# TODO: this property is relatively costly to compute, memoize it ?
direct_url = self.direct_url
if direct_url:
if direct_url.is_local_editable():
return url_to_path(direct_url.url)
else:
# Search for an .egg-link file by walking sys.path, as it was
# done before by dist_is_editable().
egg_link_path = egg_link_path_from_sys_path(self.raw_name)
if egg_link_path:
# TODO: get project location from second line of egg_link file
# (https://github.com/pypa/pip/issues/10243)
return self.location
return None
@property
def installed_location(self) -> Optional[str]:
"""The distribution's "installed" location.
This should generally be a ``site-packages`` directory. This is
usually ``dist.location``, except for legacy develop-installed packages,
where ``dist.location`` is the source code location, and this is where
the ``.egg-link`` file is.
The returned location is normalized (in particular, with symlinks removed).
"""
egg_link = egg_link_path_from_location(self.raw_name)
if egg_link:
location = egg_link
elif self.location:
location = self.location
else:
return None
return normalize_path(location)
@property
def info_location(self) -> Optional[str]:
"""Location of the .[egg|dist]-info directory or file.
Similarly to ``location``, a string value is not necessarily a
filesystem path. ``None`` means the distribution is created in-memory.
For a modern .dist-info installation on disk, this should be something
like ``{location}/{raw_name}-{version}.dist-info``.
Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If
this is a symbolic link, we want to preserve the relative path between
it and other files in the distribution.
"""
raise NotImplementedError()
@property
def installed_by_distutils(self) -> bool:
"""Whether this distribution is installed with legacy distutils format.
A distribution installed with "raw" distutils not patched by setuptools
uses one single file at ``info_location`` to store metadata. We need to
treat this specially on uninstallation.
"""
info_location = self.info_location
if not info_location:
return False
return pathlib.Path(info_location).is_file()
@property
def installed_as_egg(self) -> bool:
"""Whether this distribution is installed as an egg.
This usually indicates the distribution was installed by (older versions
of) easy_install.
"""
location = self.location
if not location:
return False
return location.endswith(".egg")
@property
def installed_with_setuptools_egg_info(self) -> bool:
"""Whether this distribution is installed with the ``.egg-info`` format.
This usually indicates the distribution was installed with setuptools
with an old pip version or with ``single-version-externally-managed``.
Note that this ensure the metadata store is a directory. distutils can
also installs an ``.egg-info``, but as a file, not a directory. This
property is *False* for that case. Also see ``installed_by_distutils``.
"""
info_location = self.info_location
if not info_location:
return False
if not info_location.endswith(".egg-info"):
return False
return pathlib.Path(info_location).is_dir()
@property
def installed_with_dist_info(self) -> bool:
"""Whether this distribution is installed with the "modern format".
This indicates a "modern" installation, e.g. storing metadata in the
``.dist-info`` directory. This applies to installations made by
setuptools (but through pip, not directly), or anything using the
standardized build backend interface (PEP 517).
"""
info_location = self.info_location
if not info_location:
return False
if not info_location.endswith(".dist-info"):
return False
return pathlib.Path(info_location).is_dir()
@property
def canonical_name(self) -> NormalizedName:
raise NotImplementedError()
@property
def version(self) -> DistributionVersion:
raise NotImplementedError()
@property
def setuptools_filename(self) -> str:
"""Convert a project name to its setuptools-compatible filename.
This is a copy of ``pkg_resources.to_filename()`` for compatibility.
"""
return self.raw_name.replace("-", "_")
@property
def direct_url(self) -> Optional[DirectUrl]:
"""Obtain a DirectUrl from this distribution.
Returns None if the distribution has no `direct_url.json` metadata,
or if `direct_url.json` is invalid.
"""
try:
content = self.read_text(DIRECT_URL_METADATA_NAME)
except FileNotFoundError:
return None
try:
return DirectUrl.from_json(content)
except (
UnicodeDecodeError,
json.JSONDecodeError,
DirectUrlValidationError,
) as e:
logger.warning(
"Error parsing %s for %s: %s",
DIRECT_URL_METADATA_NAME,
self.canonical_name,
e,
)
return None
@property
def installer(self) -> str:
try:
installer_text = self.read_text("INSTALLER")
except (OSError, ValueError, NoneMetadataError):
return "" # Fail silently if the installer file cannot be read.
for line in installer_text.splitlines():
cleaned_line = line.strip()
if cleaned_line:
return cleaned_line
return ""
@property
def editable(self) -> bool:
return bool(self.editable_project_location)
@property
def local(self) -> bool:
"""If distribution is installed in the current virtual environment.
Always True if we're not in a virtualenv.
"""
if self.installed_location is None:
return False
return is_local(self.installed_location)
@property
def in_usersite(self) -> bool:
if self.installed_location is None or user_site is None:
return False
return self.installed_location.startswith(normalize_path(user_site))
@property
def in_site_packages(self) -> bool:
if self.installed_location is None or site_packages is None:
return False
return self.installed_location.startswith(normalize_path(site_packages))
def is_file(self, path: InfoPath) -> bool:
"""Check whether an entry in the info directory is a file."""
raise NotImplementedError()
def iterdir(self, path: InfoPath) -> Iterator[pathlib.PurePosixPath]:
"""Iterate through a directory in the info directory.
Each item yielded would be a path relative to the info directory.
:raise FileNotFoundError: If ``name`` does not exist in the directory.
:raise NotADirectoryError: If ``name`` does not point to a directory.
"""
raise NotImplementedError()
def read_text(self, path: InfoPath) -> str:
"""Read a file in the info directory.
:raise FileNotFoundError: If ``name`` does not exist in the directory.
:raise NoneMetadataError: If ``name`` exists in the info directory, but
cannot be read.
"""
raise NotImplementedError()
def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
raise NotImplementedError()
@property
def metadata(self) -> email.message.Message:
"""Metadata of distribution parsed from e.g. METADATA or PKG-INFO.
This should return an empty message if the metadata file is unavailable.
:raises NoneMetadataError: If the metadata file is available, but does
not contain valid metadata.
"""
raise NotImplementedError()
@property
def metadata_version(self) -> Optional[str]:
"""Value of "Metadata-Version:" in distribution metadata, if available."""
return self.metadata.get("Metadata-Version")
@property
def raw_name(self) -> str:
"""Value of "Name:" in distribution metadata."""
# The metadata should NEVER be missing the Name: key, but if it somehow
# does, fall back to the known canonical name.
return self.metadata.get("Name", self.canonical_name)
@property
def requires_python(self) -> SpecifierSet:
"""Value of "Requires-Python:" in distribution metadata.
If the key does not exist or contains an invalid value, an empty
SpecifierSet should be returned.
"""
value = self.metadata.get("Requires-Python")
if value is None:
return SpecifierSet()
try:
# Convert to str to satisfy the type checker; this can be a Header object.
spec = SpecifierSet(str(value))
except InvalidSpecifier as e:
message = "Package %r has an invalid Requires-Python: %s"
logger.warning(message, self.raw_name, e)
return SpecifierSet()
return spec
def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:
"""Dependencies of this distribution.
For modern .dist-info distributions, this is the collection of
"Requires-Dist:" entries in distribution metadata.
"""
raise NotImplementedError()
def iter_provided_extras(self) -> Iterable[str]:
"""Extras provided by this distribution.
For modern .dist-info distributions, this is the collection of
"Provides-Extra:" entries in distribution metadata.
"""
raise NotImplementedError()
def _iter_declared_entries_from_record(self) -> Optional[Iterator[str]]:
try:
text = self.read_text("RECORD")
except FileNotFoundError:
return None
# This extra Path-str cast normalizes entries.
return (str(pathlib.Path(row[0])) for row in csv.reader(text.splitlines()))
def _iter_declared_entries_from_legacy(self) -> Optional[Iterator[str]]:
try:
text = self.read_text("installed-files.txt")
except FileNotFoundError:
return None
paths = (p for p in text.splitlines(keepends=False) if p)
root = self.location
info = self.info_location
if root is None or info is None:
return paths
try:
info_rel = pathlib.Path(info).relative_to(root)
except ValueError: # info is not relative to root.
return paths
if not info_rel.parts: # info *is* root.
return paths
return (
_convert_installed_files_path(pathlib.Path(p).parts, info_rel.parts)
for p in paths
)
def iter_declared_entries(self) -> Optional[Iterator[str]]:
"""Iterate through file entires declared in this distribution.
For modern .dist-info distributions, this is the files listed in the
``RECORD`` metadata file. For legacy setuptools distributions, this
comes from ``installed-files.txt``, with entries normalized to be
compatible with the format used by ``RECORD``.
:return: An iterator for listed entries, or None if the distribution
contains neither ``RECORD`` nor ``installed-files.txt``.
"""
return (
self._iter_declared_entries_from_record()
or self._iter_declared_entries_from_legacy()
)
class BaseEnvironment:
"""An environment containing distributions to introspect."""
@classmethod
def default(cls) -> "BaseEnvironment":
raise NotImplementedError()
@classmethod
def from_paths(cls, paths: Optional[List[str]]) -> "BaseEnvironment":
raise NotImplementedError()
def get_distribution(self, name: str) -> Optional["BaseDistribution"]:
"""Given a requirement name, return the installed distributions.
The name may not be normalized. The implementation must canonicalize
it for lookup.
"""
raise NotImplementedError()
def _iter_distributions(self) -> Iterator["BaseDistribution"]:
"""Iterate through installed distributions.
This function should be implemented by subclass, but never called
directly. Use the public ``iter_distribution()`` instead, which
implements additional logic to make sure the distributions are valid.
"""
raise NotImplementedError()
def iter_distributions(self) -> Iterator["BaseDistribution"]:
"""Iterate through installed distributions."""
for dist in self._iter_distributions():
# Make sure the distribution actually comes from a valid Python
# packaging distribution. Pip's AdjacentTempDirectory leaves folders
# e.g. ``~atplotlib.dist-info`` if cleanup was interrupted. The
# valid project name pattern is taken from PEP 508.
project_name_valid = re.match(
r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$",
dist.canonical_name,
flags=re.IGNORECASE,
)
if not project_name_valid:
logger.warning(
"Ignoring invalid distribution %s (%s)",
dist.canonical_name,
dist.location,
)
continue
yield dist
def iter_installed_distributions(
self,
local_only: bool = True,
skip: Container[str] = stdlib_pkgs,
include_editables: bool = True,
editables_only: bool = False,
user_only: bool = False,
) -> Iterator[BaseDistribution]:
"""Return a list of installed distributions.
:param local_only: If True (default), only return installations
local to the current virtualenv, if in a virtualenv.
:param skip: An iterable of canonicalized project names to ignore;
defaults to ``stdlib_pkgs``.
:param include_editables: If False, don't report editables.
:param editables_only: If True, only report editables.
:param user_only: If True, only report installations in the user
site directory.
"""
it = self.iter_distributions()
if local_only:
it = (d for d in it if d.local)
if not include_editables:
it = (d for d in it if not d.editable)
if editables_only:
it = (d for d in it if d.editable)
if user_only:
it = (d for d in it if d.in_usersite)
return (d for d in it if d.canonical_name not in skip)
class Wheel(Protocol):
location: str
def as_zipfile(self) -> zipfile.ZipFile:
raise NotImplementedError()
class FilesystemWheel(Wheel):
def __init__(self, location: str) -> None:
self.location = location
def as_zipfile(self) -> zipfile.ZipFile:
return zipfile.ZipFile(self.location, allowZip64=True)
class MemoryWheel(Wheel):
def __init__(self, location: str, stream: IO[bytes]) -> None:
self.location = location
self.stream = stream
def as_zipfile(self) -> zipfile.ZipFile:
return zipfile.ZipFile(self.stream, allowZip64=True)
|
the-stack_0_16321
|
# -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2021/4/3 0008 13:32
from function_scheduling_distributed_framework.publishers.base_publisher import AbstractPublisher
from function_scheduling_distributed_framework.utils import RedisMixin
class RedisStreamPublisher(AbstractPublisher, RedisMixin):
"""
redis 的 stream 结构 作为中间件实现的。需要redis 5.0以上,redis stream结构 是redis的消息队列,功能远超 list结构。
"""
_has__check_redis_version = False
def _check_redis_version(self):
redis_server_info_dict = self.redis_db_frame_version3.info()
if float(redis_server_info_dict['redis_version'][0]) < 5:
raise EnvironmentError('必须是5.0版本以上redis服务端才能支持 stream 数据结构,'
'请升级服务端,否则使用 REDIS_ACK_ABLE 方式使用redis 的 list 结构')
if self.redis_db_frame_version3.type(self._queue_name) == 'list':
raise EnvironmentError(f'检测到已存在 {self._queue_name} 这个键,且类型是list, 必须换个队列名字或者删除这个'
f' list 类型的键。'
f'RedisStreamConsumer 使用的是 stream数据结构')
self._has__check_redis_version = True
def concrete_realization_of_publish(self, msg):
# redis服务端必须是5.0以上,并且确保这个键的类型是stream不能是list数据结构。
if not self._has__check_redis_version:
self._check_redis_version()
self.redis_db_frame_version3.xadd(self._queue_name, {"": msg})
def clear(self):
self.redis_db_frame.delete(self._queue_name)
self.logger.warning(f'清除 {self._queue_name} 队列中的消息成功')
def get_message_count(self):
# nb_print(self.redis_db7,self._queue_name)
return self.redis_db_frame_version3.xlen(self._queue_name)
def close(self):
# self.redis_db7.connection_pool.disconnect()
pass
|
the-stack_0_16324
|
from abstract.instruccion import *
from tools.console_text import *
from tools.tabla_tipos import *
from instruccion.create_column import *
from storage import jsonMode as funciones
from error.errores import *
from tools.tabla_simbolos import *
class create_table(instruccion):
def __init__(self, id_table, columnas, inherits_s, line, column, num_nodo):
super().__init__(line, column)
self.id_table = id_table
self.columnas = columnas
self.inherits_s = inherits_s
#Nodo AST Create Table
self.nodo = nodo_AST('CREATE TABLE', num_nodo)
self.nodo.hijos.append(nodo_AST('CREATE TABLE', num_nodo+1))
self.nodo.hijos.append(nodo_AST(id_table, num_nodo+2))
self.nodo.hijos.append(nodo_AST('(', num_nodo+3))
for columna in columnas:
self.nodo.hijos.append(columna.nodo)
self.nodo.hijos.append(nodo_AST(')', num_nodo+4))
if(inherits_s != None):
self.nodo.hijos.append(inherits_s.nodo)
#Gramatica
self.grammar_ = '<TR><TD> INSTRUCCION ::= CREATE TABLE ' + id_table + ' ( COLUMNAS ) INHERITS </TD><TD> new create_table(' + id_table + ', COLUMNAS, INHERITS); </TD></TR>\n'
self.grammar_ += '<TR><TD> COLUMNAS ::= COLUMNA </TD><TD> COLUMNAS = []; </TD></TR>\n'
for columna in columnas:
self.grammar_ += columna.grammar_
if inherits_s != None:
self.grammar_ += inherits_s.grammar_
else:
self.grammar_ += '<TR><TD> INHERITS ::= EPSILON </TD><TD> INHERITS = None; </TD></TR>\n'
def ejecutar(self):
use_actual_db = get_actual_use()
#Obtener la cantidad de columnas de la tabla
count_rows = 0
for row in self.columnas:
if isinstance(row, create_column):
count_rows += 1
#Crear table
new_table = funciones.createTable(use_actual_db, self.id_table, count_rows)
# Valor de retorno: 0 operación exitosa, 1 error en la operación, 2 base de datos inexistente, 3 tabla existente.
if new_table == 0:
#Crear simbolo para la tabla
new_tb = symbol_tb(self.id_table)
ts.add_tb(use_actual_db, new_tb)
#Crear columnas
for row in self.columnas:
if isinstance(row, create_column):
row.ejecutar(self.id_table)
add_text("Tabla creada con exito - " + self.id_table + ' - en base de datos: ' + use_actual_db + '\n')
elif new_table == 1:
errores.append(nodo_error(self.line, self.column, 'Tabla no puedo ser creada con exito - ' + self.id_table + ' -', 'Semántico'))
add_text('Tabla no puedo ser creada con exito - ' + self.id_table + ' -\n')
elif new_table == 2:
errores.append(nodo_error(self.line, self.column, 'No existe la base de datos - ' + use_actual_db + ' - ', 'Semántico'))
add_text('No existe la base de datos - ' + use_actual_db + ' - \n')
elif new_table == 3:
errores.append(nodo_error(self.line, self.column, 'Ya existe una tabla con el nombre - ' + self.id_table + ' -', 'Semántico'))
add_text('Ya existe una tabla con el nombre - ' + self.id_table + ' - \n')
|
the-stack_0_16328
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_crossing02.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [43812352, 43814272]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_x_axis({'crossing': 3})
chart.set_y_axis({'crossing': 8})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
the-stack_0_16329
|
import urllib.request
from bs4 import BeautifulSoup
from assets import data
from assets import functions
from models.Fish import Fish
page = functions.scrape_file("fish.html")
table = page.find('table', {"class": "wikitable"})
tableRows = table.find_all('tr')
rowCount = 0
for row in tableRows:
rowCount = rowCount + 1
if rowCount is not 1:
rowData = row.find_all('td')
dataCount = 0
for dataCol in rowData:
dataCount = dataCount + 1
## NAME
if dataCount is 1:
fishName = dataCol.text
print(fishName)
## LOCATION
if dataCount is 2:
fishLocation = dataCol.text
## BUY
if dataCount is 3:
fishBuy = dataCol.text
## SELL
if dataCount is 4:
fishSell = dataCol.text
## RARE SELL
if dataCount is 5:
fishRareSell = dataCol.text
print( '-------' )
fish = Fish(fishName.rstrip(), fishLocation.rstrip(), '', '', fishBuy.rstrip(), fishSell.rstrip(), fishRareSell.rstrip())
functions.add_object_json_to_file(fish, "fish_1.json")
|
the-stack_0_16331
|
r"""
Orthogonal arrays (OA)
This module gathers some construction related to orthogonal arrays (or
transversal designs). One can build an `OA(k,n)` (or check that it can be built)
from the Sage console with ``designs.orthogonal_arrays.build``::
sage: OA = designs.orthogonal_arrays.build(4,8)
See also the modules :mod:`~sage.combinat.designs.orthogonal_arrays_build_recursive` or
:mod:`~sage.combinat.designs.orthogonal_arrays_find_recursive` for recursive
constructions.
This module defines the following functions:
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: |
:meth:`orthogonal_array` | Return an orthogonal array of parameters `k,n,t`.
:meth:`transversal_design` | Return a transversal design of parameters `k,n`.
:meth:`incomplete_orthogonal_array` | Return an `OA(k,n)-\sum_{1\leq i\leq x} OA(k,s_i)`.
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: |
:meth:`is_transversal_design` | Check that a given set of blocks ``B`` is a transversal design.
:meth:`~sage.combinat.designs.designs_pyx.is_orthogonal_array` | Check that the integer matrix `OA` is an `OA(k,n,t)`.
:meth:`wilson_construction` | Return a `OA(k,rm+u)` from a truncated `OA(k+s,r)` by Wilson's construction.
:meth:`TD_product` | Return the product of two transversal designs.
:meth:`OA_find_disjoint_blocks` | Return `x` disjoint blocks contained in a given `OA(k,n)`.
:meth:`OA_relabel` | Return a relabelled version of the OA.
:meth:`OA_from_quasi_difference_matrix` | Return an Orthogonal Array from a Quasi-Difference matrix
:meth:`OA_from_Vmt` | Return an Orthogonal Array from a `V(m,t)`
:meth:`OA_from_PBD` | Return an `OA(k,n)` from a PBD
:meth:`OA_n_times_2_pow_c_from_matrix` | Return an `OA(k, \vert G\vert \cdot 2^c)` from a constrained `(G,k-1,2)`-difference matrix.
:meth:`OA_from_wider_OA` | Return the first `k` columns of `OA`.
:meth:`QDM_from_Vmt` | Return a QDM a `V(m,t)`
REFERENCES:
.. [CD96] Making the MOLS table
Charles Colbourn and Jeffrey Dinitz
Computational and constructive design theory
vol 368,pages 67-134
1996
Functions
---------
"""
from __future__ import print_function
from __future__ import absolute_import
from sage.misc.cachefunc import cached_function
from sage.categories.sets_cat import EmptySetError
from sage.misc.unknown import Unknown
from .designs_pyx import is_orthogonal_array
from .group_divisible_designs import GroupDivisibleDesign
from .designs_pyx import _OA_cache_set, _OA_cache_get, _OA_cache_construction_available
def transversal_design(k,n,resolvable=False,check=True,existence=False):
r"""
Return a transversal design of parameters `k,n`.
A transversal design of parameters `k, n` is a collection `\mathcal{S}` of
subsets of `V = V_1 \cup \cdots \cup V_k` (where the *groups* `V_i` are
disjoint and have cardinality `n`) such that:
* Any `S \in \mathcal{S}` has cardinality `k` and intersects each group on
exactly one element.
* Any two elements from distincts groups are contained in exactly one
element of `\mathcal{S}`.
More general definitions sometimes involve a `\lambda` parameter, and we
assume here that `\lambda=1`.
For more information on transversal designs, see
`<http://mathworld.wolfram.com/TransversalDesign.html>`_.
INPUT:
- `n,k` -- integers. If ``k is None`` it is set to the largest value
available.
- ``resolvable`` (boolean) -- set to ``True`` if you want the design to be
resolvable (see
:meth:`sage.combinat.designs.incidence_structures.IncidenceStructure.is_resolvable`). The
`n` classes of the resolvable design are obtained as the first `n` blocks,
then the next `n` blocks, etc ... Set to ``False`` by default.
- ``check`` -- (boolean) Whether to check that output is correct before
returning it. As this is expected to be useless (but we are cautious
guys), you may want to disable it whenever you want speed. Set to
``True`` by default.
- ``existence`` (boolean) -- instead of building the design, return:
- ``True`` -- meaning that Sage knows how to build the design
- ``Unknown`` -- meaning that Sage does not know how to build the
design, but that the design may exist (see :mod:`sage.misc.unknown`).
- ``False`` -- meaning that the design does not exist.
.. NOTE::
When ``k=None`` and ``existence=True`` the function returns an
integer, i.e. the largest `k` such that we can build a `TD(k,n)`.
OUTPUT:
The kind of output depends on the input:
- if ``existence=False`` (the default) then the output is a list of lists
that represent a `TD(k,n)` with
`V_1=\{0,\dots,n-1\},\dots,V_k=\{(k-1)n,\dots,kn-1\}`
- if ``existence=True`` and ``k`` is an integer, then the function returns a
troolean: either ``True``, ``Unknown`` or ``False``
- if ``existence=True`` and ``k=None`` then the output is the largest value
of ``k`` for which Sage knows how to compute a `TD(k,n)`.
.. SEEALSO::
:func:`orthogonal_array` -- a tranversal design `TD(k,n)` is equivalent to an
orthogonal array `OA(k,n,2)`.
EXAMPLES::
sage: TD = designs.transversal_design(5,5); TD
Transversal Design TD(5,5)
sage: TD.blocks()
[[0, 5, 10, 15, 20], [0, 6, 12, 18, 24], [0, 7, 14, 16, 23],
[0, 8, 11, 19, 22], [0, 9, 13, 17, 21], [1, 5, 14, 18, 22],
[1, 6, 11, 16, 21], [1, 7, 13, 19, 20], [1, 8, 10, 17, 24],
[1, 9, 12, 15, 23], [2, 5, 13, 16, 24], [2, 6, 10, 19, 23],
[2, 7, 12, 17, 22], [2, 8, 14, 15, 21], [2, 9, 11, 18, 20],
[3, 5, 12, 19, 21], [3, 6, 14, 17, 20], [3, 7, 11, 15, 24],
[3, 8, 13, 18, 23], [3, 9, 10, 16, 22], [4, 5, 11, 17, 23],
[4, 6, 13, 15, 22], [4, 7, 10, 18, 21], [4, 8, 12, 16, 20],
[4, 9, 14, 19, 24]]
Some examples of the maximal number of transversal Sage is able to build::
sage: TD_4_10 = designs.transversal_design(4,10)
sage: designs.transversal_design(5,10,existence=True)
Unknown
For prime powers, there is an explicit construction which gives a
`TD(n+1,n)`::
sage: designs.transversal_design(4, 3, existence=True)
True
sage: designs.transversal_design(674, 673, existence=True)
True
For other values of ``n`` it depends::
sage: designs.transversal_design(7, 6, existence=True)
False
sage: designs.transversal_design(4, 6, existence=True)
Unknown
sage: designs.transversal_design(3, 6, existence=True)
True
sage: designs.transversal_design(11, 10, existence=True)
False
sage: designs.transversal_design(4, 10, existence=True)
True
sage: designs.transversal_design(5, 10, existence=True)
Unknown
sage: designs.transversal_design(7, 20, existence=True)
Unknown
sage: designs.transversal_design(6, 12, existence=True)
True
sage: designs.transversal_design(7, 12, existence=True)
True
sage: designs.transversal_design(8, 12, existence=True)
Unknown
sage: designs.transversal_design(6, 20, existence = True)
True
sage: designs.transversal_design(7, 20, existence = True)
Unknown
If you ask for a transversal design that Sage is not able to build then an
``EmptySetError`` or a ``NotImplementedError`` is raised::
sage: designs.transversal_design(47, 100)
Traceback (most recent call last):
...
NotImplementedError: I don't know how to build a TD(47,100)!
sage: designs.transversal_design(55, 54)
Traceback (most recent call last):
...
EmptySetError: There exists no TD(55,54)!
Those two errors correspond respectively to the cases where Sage answer
``Unknown`` or ``False`` when the parameter ``existence`` is set to
``True``::
sage: designs.transversal_design(47, 100, existence=True)
Unknown
sage: designs.transversal_design(55, 54, existence=True)
False
If for a given `n` you want to know the largest `k` for which Sage is able
to build a `TD(k,n)` just call the function with `k` set to ``None`` and
``existence`` set to ``True`` as follows::
sage: designs.transversal_design(None, 6, existence=True)
3
sage: designs.transversal_design(None, 20, existence=True)
6
sage: designs.transversal_design(None, 30, existence=True)
6
sage: designs.transversal_design(None, 120, existence=True)
9
TESTS:
The case when `n=1`::
sage: designs.transversal_design(5,1).blocks()
[[0, 1, 2, 3, 4]]
Obtained through Wilson's decomposition::
sage: _ = designs.transversal_design(4,38)
Obtained through product decomposition::
sage: _ = designs.transversal_design(6,60)
sage: _ = designs.transversal_design(5,60) # checks some tricky divisibility error
For small values of the parameter ``n`` we check the coherence of the
function :func:`transversal_design`::
sage: for n in xrange(2,25): # long time -- 15 secs
....: i = 2
....: while designs.transversal_design(i, n, existence=True) is True:
....: i += 1
....: _ = designs.transversal_design(i-1, n)
....: assert designs.transversal_design(None, n, existence=True) == i - 1
....: j = i
....: while designs.transversal_design(j, n, existence=True) is Unknown:
....: try:
....: _ = designs.transversal_design(j, n)
....: raise AssertionError("no NotImplementedError")
....: except NotImplementedError:
....: pass
....: j += 1
....: k = j
....: while k < n+4:
....: assert designs.transversal_design(k, n, existence=True) is False
....: try:
....: _ = designs.transversal_design(k, n)
....: raise AssertionError("no EmptySetError")
....: except EmptySetError:
....: pass
....: k += 1
....: print("%2d: (%2d, %2d)"%(n,i,j))
2: ( 4, 4)
3: ( 5, 5)
4: ( 6, 6)
5: ( 7, 7)
6: ( 4, 7)
7: ( 9, 9)
8: (10, 10)
9: (11, 11)
10: ( 5, 11)
11: (13, 13)
12: ( 8, 14)
13: (15, 15)
14: ( 7, 15)
15: ( 7, 17)
16: (18, 18)
17: (19, 19)
18: ( 8, 20)
19: (21, 21)
20: ( 7, 22)
21: ( 8, 22)
22: ( 6, 23)
23: (25, 25)
24: (10, 26)
The special case `n=1`::
sage: designs.transversal_design(3, 1).blocks()
[[0, 1, 2]]
sage: designs.transversal_design(None, 1, existence=True)
+Infinity
sage: designs.transversal_design(None, 1)
Traceback (most recent call last):
...
ValueError: there is no upper bound on k when 0<=n<=1
Resolvable TD::
sage: k,n = 5,15
sage: TD = designs.transversal_design(k,n,resolvable=True)
sage: TD.is_resolvable()
True
sage: r = designs.transversal_design(None,n,resolvable=True,existence=True)
sage: non_r = designs.transversal_design(None,n,existence=True)
sage: r + 1 == non_r
True
"""
if resolvable:
if existence:
return orthogonal_array(k,n,resolvable=True,existence=True)
else:
OA = orthogonal_array(k,n,resolvable=True,check=False)
# the call to TransversalDesign will sort the block so we can not
# rely on the order *after* the call
blocks = [[i*n+c for i,c in enumerate(B)] for B in OA]
classes = [blocks[i:i+n] for i in range(0,n*n,n)]
TD = TransversalDesign(blocks,k,n,check=check,copy=False)
TD._classes = classes
return TD
# Is k is None we find the largest available
if k is None:
if n == 0 or n == 1:
if existence:
from sage.rings.infinity import Infinity
return Infinity
raise ValueError("there is no upper bound on k when 0<=n<=1")
k = orthogonal_array(None,n,existence=True)
if existence:
return k
if existence and _OA_cache_get(k,n) is not None:
return _OA_cache_get(k,n)
may_be_available = _OA_cache_construction_available(k,n) is not False
if n == 1:
if existence:
return True
TD = [range(k)]
elif k >= n+2:
if existence:
return False
raise EmptySetError("No Transversal Design exists when k>=n+2 if n>=2")
# Section 6.6 of [Stinson2004]
elif orthogonal_array(k, n, existence=True) is not Unknown:
# Forwarding non-existence results
if orthogonal_array(k, n, existence=True):
if existence:
return True
else:
if existence:
return False
raise EmptySetError("There exists no TD({},{})!".format(k,n))
OA = orthogonal_array(k,n, check = False)
TD = [[i*n+c for i,c in enumerate(l)] for l in OA]
else:
if existence:
return Unknown
raise NotImplementedError("I don't know how to build a TD({},{})!".format(k,n))
return TransversalDesign(TD,k,n,check=check)
class TransversalDesign(GroupDivisibleDesign):
r"""
Class for Transversal Designs
INPUT:
- ``blocks`` -- collection of blocks
- ``k,n`` (integers) -- parameters of the transversal design. They can be
set to ``None`` (default) in which case their value is determined by the
blocks.
- ``check`` (boolean) -- whether to check that the design is indeed a
transversal design with the right parameters. Set to ``True`` by default.
EXAMPLES::
sage: designs.transversal_design(None,5)
Transversal Design TD(6,5)
sage: designs.transversal_design(None,30)
Transversal Design TD(6,30)
sage: designs.transversal_design(None,36)
Transversal Design TD(10,36)
"""
def __init__(self, blocks, k=None,n=None,check=True,**kwds):
r"""
Constructor of the class
EXAMPLES::
sage: designs.transversal_design(None,5)
Transversal Design TD(6,5)
"""
from math import sqrt
if k is None:
if blocks:
k=len(blocks[0])
else:
k=0
if n is None:
n = round(sqrt(len(blocks)))
self._n = n
self._k = k
if check:
assert is_transversal_design(blocks,k,n)
GroupDivisibleDesign.__init__(self,
k*n,
[range(i*n,(i+1)*n) for i in range(k)],
blocks,
check=False,
**kwds)
def __repr__(self):
r"""
Returns a string describing the transversal design.
EXAMPLES::
sage: designs.transversal_design(None,5)
Transversal Design TD(6,5)
sage: designs.transversal_design(None,30)
Transversal Design TD(6,30)
sage: designs.transversal_design(None,36)
Transversal Design TD(10,36)
"""
return "Transversal Design TD({},{})".format(self._k,self._n)
def is_transversal_design(B,k,n, verbose=False):
r"""
Check that a given set of blocks ``B`` is a transversal design.
See :func:`~sage.combinat.designs.orthogonal_arrays.transversal_design`
for a definition.
INPUT:
- ``B`` -- the list of blocks
- ``k, n`` -- integers
- ``verbose`` (boolean) -- whether to display information about what is
going wrong.
.. NOTE::
The tranversal design must have `\{0, \ldots, kn-1\}` as a ground set,
partitioned as `k` sets of size `n`: `\{0, \ldots, k-1\} \sqcup
\{k, \ldots, 2k-1\} \sqcup \cdots \sqcup \{k(n-1), \ldots, kn-1\}`.
EXAMPLES::
sage: TD = designs.transversal_design(5, 5, check=True) # indirect doctest
sage: from sage.combinat.designs.orthogonal_arrays import is_transversal_design
sage: is_transversal_design(TD, 5, 5)
True
sage: is_transversal_design(TD, 4, 4)
False
"""
return is_orthogonal_array([[x%n for x in R] for R in B],k,n,verbose=verbose)
def wilson_construction(OA,k,r,m,u,check=True,explain_construction=False):
r"""
Returns a `OA(k,rm+\sum_i u_i)` from a truncated `OA(k+s,r)` by Wilson's
construction.
**Simple form:**
Let `OA` be a truncated `OA(k+s,r)` with `s` truncated columns of sizes
`u_1,...,u_s`, whose blocks have sizes in `\{k+b_1,...,k+b_t\}`. If there
exist:
- An `OA(k,m+b_i) - b_i.OA(k,1)` for every `1\leq i\leq t`
- An `OA(k,u_i)` for every `1\leq i\leq s`
Then there exists an `OA(k,rm+\sum u_i)`. The construction is a
generalization of Lemma 3.16 in [HananiBIBD]_.
**Brouwer-Van Rees form:**
Let `OA` be a truncated `OA(k+s,r)` with `s` truncated columns of sizes
`u_1,...,u_s`. Let the set `H_i` of the `u_i` points of column `k+i` be
partitionned into `\sum_j H_{ij}`. Let `m_{ij}` be integers
such that:
- For `0\leq i <l` there exists an `OA(k,\sum_j m_{ij}|H_{ij}|)`
- For any block `B\in OA` intersecting the sets `H_{ij(i)}` there exists an
`OA(k,m+\sum_i m_{ij})-\sum_i OA(k,m_{ij(j)})`.
Then there exists an `OA(k,rm+\sum_{i,j}m_{ij})`. This construction appears
in [BvR82]_.
INPUT:
- ``OA`` -- an incomplete orthogonal array with `k+s` columns. The elements
of a column of size `c` must belong to `\{0,...,c\}`. The missing entries
of a block are represented by ``None`` values. If ``OA=None``, it is
defined as a truncated orthogonal arrays with `k+s` columns.
- ``k,r,m`` (integers)
- ``u`` (list) -- two cases depending on the form to use:
- Simple form: a list of length `s` such that column ``k+i`` has size
``u[i]``. The untruncated points of column ``k+i`` are assumed to be
``[0,...,u[i]-1]``.
- Brouwer-Van Rees form: a list of length `s` such that ``u[i]`` is the
list of pairs `(m_{i0},|H_{i0}|),...,(m_{ip_i},|H_{ip_i}|)`. The
untruncated points of column ``k+i`` are assumed to be `[0,...,u_i-1]`
where `u_i=\sum_j |H_{ip_i}|`. Besides, the first `|H_{i0}|` points
represent `H_{i0}`, the next `|H_{i1}|` points represent `H_{i1}`,
etc...
- ``explain_construction`` (boolean) -- return a string describing
the construction.
- ``check`` (boolean) -- whether to check that output is correct before
returning it. As this is expected to be useless (but we are cautious
guys), you may want to disable it whenever you want speed. Set to ``True``
by default.
REFERENCE:
.. [HananiBIBD] Balanced incomplete block designs and related designs,
Haim Hanani,
Discrete Mathematics 11.3 (1975) pages 255-369.
EXAMPLES::
sage: from sage.combinat.designs.orthogonal_arrays import wilson_construction
sage: from sage.combinat.designs.orthogonal_arrays import OA_relabel
sage: from sage.combinat.designs.orthogonal_arrays_find_recursive import find_wilson_decomposition_with_one_truncated_group
sage: total = 0
sage: for k in range(3,8):
....: for n in range(1,30):
....: if find_wilson_decomposition_with_one_truncated_group(k,n):
....: total += 1
....: f, args = find_wilson_decomposition_with_one_truncated_group(k,n)
....: _ = f(*args)
sage: total
41
sage: print(designs.orthogonal_arrays.explain_construction(7,58))
Wilson's construction n=8.7+1+1 with master design OA(7+2,8)
sage: print(designs.orthogonal_arrays.explain_construction(9,115))
Wilson's construction n=13.8+11 with master design OA(9+1,13)
sage: print(wilson_construction(None,5,11,21,[[(5,5)]],explain_construction=True))
Brouwer-van Rees construction n=11.21+(5.5) with master design OA(5+1,11)
sage: print(wilson_construction(None,71,17,21,[[(4,9),(1,1)],[(9,9),(1,1)]],explain_construction=True))
Brouwer-van Rees construction n=17.21+(9.4+1.1)+(9.9+1.1) with master design OA(71+2,17)
An example using the Brouwer-van Rees generalization::
sage: from sage.combinat.designs.orthogonal_arrays import is_orthogonal_array
sage: from sage.combinat.designs.orthogonal_arrays import wilson_construction
sage: OA = designs.orthogonal_arrays.build(6,11)
sage: OA = [[x if (i<5 or x<5) else None for i,x in enumerate(R)] for R in OA]
sage: OAb = wilson_construction(OA,5,11,21,[[(5,5)]])
sage: is_orthogonal_array(OAb,5,256)
True
"""
# Converting the input to Brouwer-Van Rees form
try:
if u:
int(u[0])
except TypeError:
pass
else:
u = [[(1,uu)] for uu in u]
n_trunc = len(u)
if explain_construction:
if not u:
return ("Product of orthogonal arrays n={}.{}").format(r,m)
elif all(len(uu) == 1 and uu[0][0] == 1 for uu in u):
return ("Wilson's construction n={}.{}+{} with master design OA({}+{},{})"
.format(r, m, "+".join(str(x) for ((_,x),) in u), k, n_trunc, r))
else:
return ("Brouwer-van Rees construction n={}.{}+{} with master design OA({}+{},{})"
.format(r, m,
"+".join("(" + "+".join(str(x)+"."+str(mul) for mul,x in uu) + ")"
for uu in u),
k, n_trunc, r))
if OA is None:
master_design = orthogonal_array(k+n_trunc,r,check=False)
matrix = [range(r)]*k
for uu in u:
uu = sum(x[1] for x in uu)
matrix.append(range(uu)+[None]*(r-uu))
master_design = OA_relabel(master_design, k+n_trunc, r, matrix=matrix)
else:
master_design = OA
for c in u:
assert all(m_ij>=0 and h_size>=0 for m_ij,h_size in c)
assert sum(h_size for m_ij,h_size in c) <= r
# Associates a point ij from a truncated column k+i to
#
# - its corresponding multiplier
# - its corresponding set of points in the final design.
point_to_mij = []
point_to_point_set = []
n=r*m
for i,partition in enumerate(u):
column_i_point_to_mij = []
column_i_point_to_point_set = []
for mij,h_size in partition:
for _ in range(h_size):
column_i_point_to_mij.append(mij)
column_i_point_to_point_set.append(range(n,n+mij))
n+=mij
point_to_mij.append(column_i_point_to_mij)
point_to_point_set.append(column_i_point_to_point_set)
# the set of ij associated with each block
block_to_ij = lambda B: ((i,j) for i,j in enumerate(B[k:]) if j is not None)
# The different profiles (set of mij associated with each block)
block_profiles = set(tuple(point_to_mij[i][j] for i,j in block_to_ij(B)) for B in master_design)
# For each block meeting multipliers m_ij(0),...,m_ij(s) we need a
# OA(k,m+\sum m_{ij(i)})-\sum OA(k,\sum m_{ij(i)})
OA_incomplete = {profile: incomplete_orthogonal_array(k, m+sum(profile),
profile) for profile in block_profiles}
# For each truncated column k+i partitionned into H_{i0},...,H_{ip_i} we
# need a OA(k,\sum_j m_{ij} * |H_{ij}|)
OA_k_u = {sum(c): orthogonal_array(k, sum(c)) for c in point_to_mij}
# Building the actual design !
OA = []
for B in master_design:
# The missing entries belong to the last n_trunc columns
assert all(x is not None for x in B[:k])
# We replace the block of profile m_{ij(0)},...,m_{ij(s)} with a
# OA(k,m+\sum_i m_ij(i)) properly relabelled
matrix = [range(i*m,(i+1)*m) for i in B[:k]]
profile = []
for i,j in block_to_ij(B):
profile.append(point_to_mij[i][j])
for C in matrix:
C.extend(point_to_point_set[i][j])
OA.extend(OA_relabel(OA_incomplete[tuple(profile)],k,m+sum(profile),matrix=matrix))
# The missing OA(k,uu)
for i in range(n_trunc):
length = sum(point_to_mij[i])
OA.extend(OA_relabel(OA_k_u[length],
k,
length,
matrix=[sum(point_to_point_set[i],[])]*k))
if check:
from .designs_pyx import is_orthogonal_array
assert is_orthogonal_array(OA,k,n,2)
return OA
def TD_product(k,TD1,n1,TD2,n2, check=True):
r"""
Return the product of two transversal designs.
From a transversal design `TD_1` of parameters `k,n_1` and a transversal
design `TD_2` of parameters `k,n_2`, this function returns a transversal
design of parameters `k,n` where `n=n_1\times n_2`.
Formally, if the groups of `TD_1` are `V^1_1,\dots,V^1_k` and the groups of
`TD_2` are `V^2_1,\dots,V^2_k`, the groups of the product design are
`V^1_1\times V^2_1,\dots,V^1_k\times V^2_k` and its blocks are the
`\{(x^1_1,x^2_1),\dots,(x^1_k,x^2_k)\}` where `\{x^1_1,\dots,x^1_k\}` is a
block of `TD_1` and `\{x^2_1,\dots,x^2_k\}` is a block of `TD_2`.
INPUT:
- ``TD1, TD2`` -- transversal designs.
- ``k,n1,n2`` (integers) -- see above.
- ``check`` (boolean) -- Whether to check that output is correct before
returning it. As this is expected to be useless (but we are cautious
guys), you may want to disable it whenever you want speed. Set to ``True``
by default.
.. NOTE::
This function uses transversal designs with
`V_1=\{0,\dots,n-1\},\dots,V_k=\{(k-1)n,\dots,kn-1\}` both as input and
ouptut.
EXAMPLES::
sage: from sage.combinat.designs.orthogonal_arrays import TD_product
sage: TD1 = designs.transversal_design(6,7)
sage: TD2 = designs.transversal_design(6,12)
sage: TD6_84 = TD_product(6,TD1,7,TD2,12)
"""
N = n1*n2
TD = []
for X1 in TD1:
for X2 in TD2:
TD.append([x1*n2+(x2%n2) for x1,x2 in zip(X1,X2)])
if check:
assert is_transversal_design(TD,k,N)
return TD
def orthogonal_array(k,n,t=2,resolvable=False, check=True,existence=False,explain_construction=False):
r"""
Return an orthogonal array of parameters `k,n,t`.
An orthogonal array of parameters `k,n,t` is a matrix with `k` columns
filled with integers from `[n]` in such a way that for any `t` columns, each
of the `n^t` possible rows occurs exactly once. In
particular, the matrix has `n^t` rows.
More general definitions sometimes involve a `\lambda` parameter, and we
assume here that `\lambda=1`.
An orthogonal array is said to be *resolvable* if it corresponds to a
resolvable transversal design (see
:meth:`sage.combinat.designs.incidence_structures.IncidenceStructure.is_resolvable`).
For more information on orthogonal arrays, see
:wikipedia:`Orthogonal_array`.
INPUT:
- ``k`` -- (integer) number of columns. If ``k=None`` it is set to the
largest value available.
- ``n`` -- (integer) number of symbols
- ``t`` -- (integer; default: 2) -- strength of the array
- ``resolvable`` (boolean) -- set to ``True`` if you want the design to be
resolvable. The `n` classes of the resolvable design are obtained as the
first `n` blocks, then the next `n` blocks, etc ... Set to ``False`` by
default.
- ``check`` -- (boolean) Whether to check that output is correct before
returning it. As this is expected to be useless (but we are cautious
guys), you may want to disable it whenever you want speed. Set to
``True`` by default.
- ``existence`` (boolean) -- instead of building the design, return:
- ``True`` -- meaning that Sage knows how to build the design
- ``Unknown`` -- meaning that Sage does not know how to build the
design, but that the design may exist (see :mod:`sage.misc.unknown`).
- ``False`` -- meaning that the design does not exist.
.. NOTE::
When ``k=None`` and ``existence=True`` the function returns an
integer, i.e. the largest `k` such that we can build a `OA(k,n)`.
- ``explain_construction`` (boolean) -- return a string describing
the construction.
OUTPUT:
The kind of output depends on the input:
- if ``existence=False`` (the default) then the output is a list of lists
that represent an orthogonal array with parameters ``k`` and ``n``
- if ``existence=True`` and ``k`` is an integer, then the function returns a
troolean: either ``True``, ``Unknown`` or ``False``
- if ``existence=True`` and ``k=None`` then the output is the largest value
of ``k`` for which Sage knows how to compute a `TD(k,n)`.
.. NOTE::
This method implements theorems from [Stinson2004]_. See the code's
documentation for details.
.. SEEALSO::
When `t=2` an orthogonal array is also a transversal design (see
:func:`transversal_design`) and a family of mutually orthogonal latin
squares (see
:func:`~sage.combinat.designs.latin_squares.mutually_orthogonal_latin_squares`).
TESTS:
The special cases `n=0,1`::
sage: designs.orthogonal_arrays.build(3,0)
[]
sage: designs.orthogonal_arrays.build(3,1)
[[0, 0, 0]]
sage: designs.orthogonal_arrays.largest_available_k(0)
+Infinity
sage: designs.orthogonal_arrays.largest_available_k(1)
+Infinity
sage: designs.orthogonal_arrays.build(16,0)
[]
sage: designs.orthogonal_arrays.build(16,1)
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
when `t>2` and `k=None`::
sage: t = 3
sage: designs.orthogonal_arrays.largest_available_k(5,t=t) == t
True
sage: _ = designs.orthogonal_arrays.build(t,5,t)
"""
assert n>=0, "n(={}) must be nonnegative".format(n)
# A resolvable OA(k,n) is an OA(k+1,n)
if resolvable:
assert t==2, "resolvable designs are only handled when t=2"
if existence and k is not None:
return orthogonal_array(k+1,n,existence=True)
if k is None:
k = orthogonal_array(None,n,existence=True)-1
if existence:
return k
OA = sorted(orthogonal_array(k+1,n,check=check))
return [B[1:] for B in OA]
# If k is set to None we find the largest value available
if k is None:
if existence:
return largest_available_k(n,t)
elif n == 0 or n == 1:
raise ValueError("there is no upper bound on k when 0<=n<=1")
else:
k = largest_available_k(n,t)
if k < t:
raise ValueError("undefined for k<t")
if existence and _OA_cache_get(k,n) is not None and t == 2:
return _OA_cache_get(k,n)
from .block_design import projective_plane
from .latin_squares import mutually_orthogonal_latin_squares
from .database import OA_constructions, MOLS_constructions, QDM
from .orthogonal_arrays_find_recursive import find_recursive_construction
from .difference_matrices import difference_matrix
may_be_available = _OA_cache_construction_available(k,n) is not False
if n <= 1:
if existence:
return True
if explain_construction:
return "Trivial construction"
OA = [[0]*k]*n
elif k >= n+t:
# When t=2 then k<n+t as it is equivalent to the existence of n-1 MOLS.
# When t>2 the submatrix defined by the rows whose first t-2 elements
# are 0s yields a OA with t=2 and k-(t-2) columns. Thus k-(t-2) < n+2,
# i.e. k<n+t.
if existence:
return False
msg = "There exists no OA({},{}) as k(={})>n+t-1={}".format(k,n,k,n+t-1)
if explain_construction:
return msg
raise EmptySetError(msg)
elif k <= t:
if existence:
return True
if explain_construction:
return "Trivial construction [n]^k"
from itertools import product
return [list(x) for x in product(range(n), repeat=k)]
elif t != 2:
if existence:
return Unknown
msg = "Only trivial orthogonal arrays are implemented for t>=2"
if explain_construction:
return msg
raise NotImplementedError(msg)
elif k <= 3:
if existence:
return True
if explain_construction:
return "Cyclic latin square"
return [[i,j,(i+j)%n] for i in xrange(n) for j in xrange(n)]
# projective spaces are equivalent to OA(n+1,n,2)
elif (projective_plane(n, existence=True) or
(k == n+1 and projective_plane(n, existence=True) is False)):
_OA_cache_set(n+1,n,projective_plane(n, existence=True))
if k == n+1:
if existence:
return projective_plane(n, existence=True)
if explain_construction:
return "From a projective plane of order {}".format(n)
from .block_design import projective_plane_to_OA
p = projective_plane(n, check=False)
OA = projective_plane_to_OA(p, check=False)
else:
if existence:
return True
if explain_construction:
return "From a projective plane of order {}".format(n)
from .block_design import projective_plane_to_OA
p = projective_plane(n, check=False)
OA = [l[:k] for l in projective_plane_to_OA(p, check=False)]
# Constructions from the database (OA)
elif may_be_available and n in OA_constructions and k <= OA_constructions[n][0]:
_OA_cache_set(OA_constructions[n][0],n,True)
if existence:
return True
if explain_construction:
return "the database contains an OA({},{})".format(OA_constructions[n][0],n)
_, construction = OA_constructions[n]
OA = OA_from_wider_OA(construction(),k)
# Constructions from the database II (MOLS: Section 6.5.1 from [Stinson2004])
elif may_be_available and n in MOLS_constructions and k-2 <= MOLS_constructions[n][0]:
_OA_cache_set(MOLS_constructions[n][0]+2,n,True)
if existence:
return True
elif explain_construction:
return "the database contains {} MOLS of order {}".format(MOLS_constructions[n][0],n)
else:
construction = MOLS_constructions[n][1]
mols = construction()
OA = [[i,j]+[m[i,j] for m in mols]
for i in range(n) for j in range(n)]
OA = OA_from_wider_OA(OA,k)
# Constructions from the database III (Quasi-difference matrices)
elif (may_be_available and
(n,1) in QDM and
any(kk>=k and mu<=lmbda and (orthogonal_array(k,u,existence=True) is True) for (_,lmbda,mu,u),(kk,_) in QDM[n,1].items())):
_OA_cache_set(k,n,True)
for (nn,lmbda,mu,u),(kk,f) in QDM[n,1].items():
if (kk>=k and
mu<=lmbda and
(orthogonal_array(k,u,existence=True) is True)):
if existence:
return True
elif explain_construction:
return "the database contains a ({},{};{},{};{})-quasi difference matrix".format(nn,k,lmbda,mu,u)
G,M = f()
M = [R[:k] for R in M]
OA = OA_from_quasi_difference_matrix(M,G,add_col=False)
break
# From Difference Matrices
elif may_be_available and difference_matrix(n,k-1,existence=True):
_OA_cache_set(k,n,True)
if existence:
return True
if explain_construction:
return "from a ({},{})-difference matrix".format(n,k-1)
G,M = difference_matrix(n,k-1)
OA = OA_from_quasi_difference_matrix(M,G,add_col=True)
elif may_be_available and find_recursive_construction(k,n):
_OA_cache_set(k,n,True)
if existence:
return True
f,args = find_recursive_construction(k,n)
if explain_construction:
return f(*args,explain_construction=True)
OA = f(*args)
else:
_OA_cache_set(k,n,Unknown)
if existence:
return Unknown
elif explain_construction:
return "No idea"
raise NotImplementedError("I don't know how to build an OA({},{})!".format(k,n))
if check:
assert is_orthogonal_array(OA,k,n,t,verbose=1), "Sage built an incorrect OA({},{}) O_o".format(k,n)
return OA
def largest_available_k(n,t=2):
r"""
Return the largest `k` such that Sage can build an `OA(k,n)`.
INPUT:
- ``n`` (integer)
- ``t`` -- (integer; default: 2) -- strength of the array
EXAMPLE::
sage: designs.orthogonal_arrays.largest_available_k(0)
+Infinity
sage: designs.orthogonal_arrays.largest_available_k(1)
+Infinity
sage: designs.orthogonal_arrays.largest_available_k(10)
4
sage: designs.orthogonal_arrays.largest_available_k(27)
28
sage: designs.orthogonal_arrays.largest_available_k(100)
10
sage: designs.orthogonal_arrays.largest_available_k(-1)
Traceback (most recent call last):
...
ValueError: n(=-1) was expected to be >=0
"""
from .block_design import projective_plane
if n<0:
raise ValueError("n(={}) was expected to be >=0".format(n))
if t<0:
raise ValueError("t(={}) was expected to be >=0".format(t))
if n == 0 or n == 1:
from sage.rings.infinity import Infinity
return Infinity
elif t == 2:
if projective_plane(n,existence=True):
return n+1
else:
k=1
while _OA_cache_construction_available(k+1,n) is True:
k=k+1
else:
k=t-1
while orthogonal_array(k+1,n,t,existence=True) is True:
k += 1
return k
def incomplete_orthogonal_array(k,n,holes,resolvable=False, existence=False):
r"""
Return an `OA(k,n)-\sum_{1\leq i\leq x} OA(k,s_i)`.
An `OA(k,n)-\sum_{1\leq i\leq x} OA(k,s_i)` is an orthogonal array from
which have been removed disjoint `OA(k,s_1),...,OA(k,s_x)`. If there exist
`OA(k,s_1),...,OA(k,s_x)` they can be used to fill the holes and give rise
to an `OA(k,n)`.
A very useful particular case (see e.g. the Wilson construction in
:func:`wilson_construction`) is when all `s_i=1`. In that case the
incomplete design is a `OA(k,n)-x.OA(k,1)`. Such design is equivalent to
transversal design `TD(k,n)` from which has been removed `x` disjoint
blocks.
INPUT:
- ``k,n`` (integers)
- ``holes`` (list of integers) -- respective sizes of the holes to be found.
- ``resolvable`` (boolean) -- set to ``True`` if you want the design to be
resolvable. The classes of the resolvable design are obtained as the first
`n` blocks, then the next `n` blocks, etc ... Set to ``False`` by default.
- ``existence`` (boolean) -- instead of building the design, return:
- ``True`` -- meaning that Sage knows how to build the design
- ``Unknown`` -- meaning that Sage does not know how to build the
design, but that the design may exist (see :mod:`sage.misc.unknown`).
- ``False`` -- meaning that the design does not exist.
.. NOTE::
By convention, the ground set is always `V = \{0, ..., n-1\}`.
If all holes have size 1, in the incomplete orthogonal array returned by
this function the holes are `\{n-1, ..., n-s_1\}^k`,
`\{n-s_1-1,...,n-s_1-s_2\}^k`, etc.
More generally, if ``holes`` is equal to `u1,...,uk`, the `i`-th hole is
the set of points `\{n-\sum_{j\geq i}u_j,...,n-\sum_{j\geq i+1}u_j\}^k`.
.. SEEALSO::
:func:`OA_find_disjoint_blocks`
EXAMPLES::
sage: IOA = designs.incomplete_orthogonal_array(3,3,[1,1,1])
sage: IOA
[[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 0, 1], [2, 1, 0]]
sage: missing_blocks = [[0,0,0],[1,1,1],[2,2,2]]
sage: from sage.combinat.designs.orthogonal_arrays import is_orthogonal_array
sage: is_orthogonal_array(IOA + missing_blocks,3,3,2)
True
TESTS:
Affine planes and projective planes::
sage: for q in xrange(2,100):
....: if is_prime_power(q):
....: assert designs.incomplete_orthogonal_array(q,q,[1]*q,existence=True)
....: assert not designs.incomplete_orthogonal_array(q+1,q,[1]*2,existence=True)
Further tests::
sage: designs.incomplete_orthogonal_array(8,4,[1,1,1],existence=True)
False
sage: designs.incomplete_orthogonal_array(5,10,[1,1,1],existence=True)
Unknown
sage: designs.incomplete_orthogonal_array(5,10,[1,1,1])
Traceback (most recent call last):
...
NotImplementedError: I don't know how to build an OA(5,10)!
sage: designs.incomplete_orthogonal_array(4,3,[1,1])
Traceback (most recent call last):
...
EmptySetError: There is no OA(n+1,n) - 2.OA(n+1,1) as all blocks intersect in a projective plane.
sage: n=10
sage: k=designs.orthogonal_arrays.largest_available_k(n)
sage: designs.incomplete_orthogonal_array(k,n,[1,1,1],existence=True)
True
sage: _ = designs.incomplete_orthogonal_array(k,n,[1,1,1])
sage: _ = designs.incomplete_orthogonal_array(k,n,[1])
A resolvable `OA(k,n)-n.OA(k,1)`. We check that extending each class and
adding the `[i,i,...]` blocks turns it into an `OA(k+1,n)`.::
sage: from sage.combinat.designs.orthogonal_arrays import is_orthogonal_array
sage: k,n=5,7
sage: OA = designs.incomplete_orthogonal_array(k,n,[1]*n,resolvable=True)
sage: classes = [OA[i*n:(i+1)*n] for i in range(n-1)]
sage: for classs in classes: # The design is resolvable !
....: assert(len(set(col))==n for col in zip(*classs))
sage: OA.extend([[i]*(k) for i in range(n)])
sage: for i,R in enumerate(OA):
....: R.append(i//n)
sage: is_orthogonal_array(OA,k+1,n)
True
Non-existent resolvable incomplete OA::
sage: designs.incomplete_orthogonal_array(9,13,[1]*10,resolvable=True,existence=True)
False
sage: designs.incomplete_orthogonal_array(9,13,[1]*10,resolvable=True)
Traceback (most recent call last):
...
EmptySetError: There is no resolvable incomplete OA(9,13) whose holes' sizes sum to 10<n(=13)
Error message for big holes::
sage: designs.incomplete_orthogonal_array(6,4*9,[9,9,8])
Traceback (most recent call last):
...
NotImplementedError: I was not able to build this OA(6,36)-OA(6,8)-2.OA(6,9)
10 holes of size 9 through the product construction::
sage: iOA = designs.incomplete_orthogonal_array(10,153,[9]*10) # long time
sage: OA9 = designs.orthogonal_arrays.build(10,9) # long time
sage: for i in range(10): # long time
....: iOA.extend([[153-9*(i+1)+x for x in B] for B in OA9]) # long time
sage: is_orthogonal_array(iOA,10,153) # long time
True
An `OA(9,82)-OA(9,9)-OA(9,1)`::
sage: ioa = designs.incomplete_orthogonal_array(9,82,[9,1])
sage: ioa.extend([[x+72 for x in B] for B in designs.orthogonal_arrays.build(9,9)])
sage: ioa.extend([[x+81 for x in B] for B in designs.orthogonal_arrays.build(9,1)])
sage: is_orthogonal_array(ioa,9,82,verbose=1)
True
An `OA(9,82)-OA(9,9)-2.OA(9,1)` in different orders::
sage: ioa = designs.incomplete_orthogonal_array(9,82,[1,9,1])
sage: ioa.extend([[x+71 for x in B] for B in designs.orthogonal_arrays.build(9,1)])
sage: ioa.extend([[x+72 for x in B] for B in designs.orthogonal_arrays.build(9,9)])
sage: ioa.extend([[x+81 for x in B] for B in designs.orthogonal_arrays.build(9,1)])
sage: is_orthogonal_array(ioa,9,82,verbose=1)
True
sage: ioa = designs.incomplete_orthogonal_array(9,82,[9,1,1])
sage: ioa.extend([[x+71 for x in B] for B in designs.orthogonal_arrays.build(9,9)])
sage: ioa.extend([[x+80 for x in B] for B in designs.orthogonal_arrays.build(9,1)])
sage: ioa.extend([[x+81 for x in B] for B in designs.orthogonal_arrays.build(9,1)])
sage: is_orthogonal_array(ioa,9,82,verbose=1)
True
Three holes of size 1::
sage: ioa = designs.incomplete_orthogonal_array(3,6,[1,1,1])
sage: ioa.extend([[i]*3 for i in [3,4,5]])
sage: is_orthogonal_array(ioa,3,6,verbose=1)
True
REFERENCES:
.. [BvR82] More mutually orthogonal Latin squares,
Andries Brouwer and John van Rees
Discrete Mathematics
vol.39, num.3, pages 263-281
1982
http://oai.cwi.nl/oai/asset/304/0304A.pdf
"""
from sage.combinat.designs.database import QDM
for h in holes:
if h<0:
raise ValueError("Holes must have size >=0, but {} was in the list").format(h)
holes = [h for h in holes if h>0]
if not holes:
return orthogonal_array(k,n,existence=existence,resolvable=resolvable)
sum_of_holes = sum(holes)
number_of_holes = len(holes)
max_hole = max(holes)
min_hole = min(holes)
if sum_of_holes > n:
if existence:
return False
raise EmptySetError("The total size of holes must be smaller or equal than the size of the ground set")
if (max_hole == 1 and
resolvable and
sum_of_holes != n):
if existence:
return False
raise EmptySetError("There is no resolvable incomplete OA({},{}) whose holes' sizes sum to {}<n(={})".format(k,n,sum_of_holes,n))
# resolvable OA(k,n)-n.OA(k,1) ==> equivalent to OA(k+1,n)
if max_hole==1 and resolvable:
if existence:
return orthogonal_array(k+1,n,existence=True)
OA = sorted(orthogonal_array(k+1,n))
OA = [B[1:] for B in OA]
# We now relabel the points so that the last n blocks are the [i,i,...]
relabel = [[0]*n for _ in range(k)]
for i,B in enumerate(OA[-n:]):
for ii,xx in enumerate(B):
relabel[ii][xx] = i
OA = [[relabel[i][xx] for i,xx in enumerate(B)] for B in OA]
# Let's drop the last blocks
assert all(OA[-n+i] == [i]*k for i in range(n)), "The last n blocks should be [i,i,...]"
return OA[:-n]
# Easy case
elif max_hole==1 and number_of_holes <= 1:
if existence:
return orthogonal_array(k,n,existence=True)
OA = orthogonal_array(k,n)
independent_set = OA[:number_of_holes]
# This is lemma 2.3 from [BvR82]_
#
# If k>3 and n>(k-1)u and there exists an OA(k,n)-OA(k,u), then there exists
# an OA(k,n)-OA(k,u)-2.OA(k,1)
elif (k >= 3 and
2 <= number_of_holes <= 3 and
n > (k-1)*max_hole and
holes.count(1) == number_of_holes-1 and
incomplete_orthogonal_array(k,n,[max_hole],existence=True)):
if existence:
return True
# The 1<=?<=2 other holes of size 1 can be picked greedily as the
# conflict graph is regular and not complete (see proof of lemma 2.3)
#
# This code is a bit awkward for max_hole may be equal to 1, and the
# holes have to be correctly ordered in the output.
IOA = incomplete_orthogonal_array(k,n,[max_hole])
# place the big hole where it belongs
i = holes.index(max_hole)
holes[i] = [[ii]*k for ii in range(n-max_hole,n)]
# place the first hole of size 1
i = holes.index(1)
for h1 in IOA:
if all(x<n-max_hole for x in h1):
break
holes[i] = [h1]
IOA.remove(h1)
# place the potential second hole of size 1
if number_of_holes == 3:
i = holes.index(1)
for h2 in IOA:
if all(h1[j] != x and x<n-max_hole for j,x in enumerate(h2)):
break
holes[i] = [h2]
IOA.remove(h2)
holes = sum(holes,[])
holes = map(list,zip(*holes))
# Building the relabel matrix
for l in holes:
for i in range(n):
if i not in l:
l.insert(0,i)
for i in range(len(holes)):
holes[i] = {v:i for i,v in enumerate(holes[i])}
IOA = OA_relabel(IOA,k,n,matrix=holes)
return IOA
elif max_hole==1 and number_of_holes >= 2 and k == n+1:
if existence:
return False
raise EmptySetError(("There is no OA(n+1,n) - {}.OA(n+1,1) as all blocks "
"intersect in a projective plane.").format(number_of_holes))
# Holes of size 1 from OA(k+1,n)
elif max_hole==1 and orthogonal_array(k+1,n,existence=True):
if existence:
return True
OA = orthogonal_array(k+1,n)
independent_set = [B[:-1] for B in OA if B[-1] == 0][:number_of_holes]
OA = [B[:-1] for B in OA]
elif max_hole==1 and orthogonal_array(k,n,existence=True):
OA = orthogonal_array(k,n)
try:
independent_set = OA_find_disjoint_blocks(OA,k,n,number_of_holes)
except ValueError:
if existence:
return Unknown
raise NotImplementedError("I was not able to build this OA({},{})-{}.OA({},1)".format(k,n,number_of_holes,k))
if existence:
return True
independent_set = OA_find_disjoint_blocks(OA,k,n,number_of_holes)
elif max_hole==1 and not orthogonal_array(k,n,existence=True):
return orthogonal_array(k,n,existence=existence)
# From a quasi-difference matrix
elif number_of_holes==1 and any(uu==sum_of_holes and mu<=1 and lmbda==1 and k<=kk+1 for (nn,lmbda,mu,uu),(kk,_) in QDM.get((n,1),{}).iteritems()):
for (nn,lmbda,mu,uu),(kk,f) in QDM[n,1].iteritems():
if uu==sum_of_holes and mu<=1 and lmbda==1 and k<=kk+1:
break
G,M = f()
OA = OA_from_quasi_difference_matrix(M,G,fill_hole=False)
return [B[:k] for B in OA]
# Equal holes [h,h,...] with h>1 through OA product construction
#
# (i.e. OA(k,n1)-x.OA(k,1) and OA(k,n2) ==> OA(k,n1.n2)-x.OA(k,n2) )
elif (min_hole > 1 and
max_hole == min_hole and
n%min_hole == 0 and # h divides n
orthogonal_array(k,min_hole,existence=True) and # OA(k,h)
incomplete_orthogonal_array(k,n//min_hole,[1]*number_of_holes,existence=True)): # OA(k,n/h)-x.OA(k,1)
if existence:
return True
h = min_hole
iOA1 = incomplete_orthogonal_array(k,n//holes[0],[1]*number_of_holes)
iOA2 = orthogonal_array(k,h)
return [[B1[i]*h+B2[i] for i in range(k)]
for B1 in iOA1
for B2 in iOA2]
else:
if existence:
return Unknown
# format the list of holes
f = lambda x: "" if x == 1 else "{}.".format(x)
holes_string = "".join("-{}OA({},{})".format(f(holes.count(x)),k,x) for x in sorted(set(holes)))
raise NotImplementedError("I was not able to build this OA({},{}){}".format(k,n,holes_string))
assert number_of_holes == len(independent_set)
for B in independent_set:
OA.remove(B)
OA = OA_relabel(OA,k,n,blocks=independent_set)
return OA
def OA_find_disjoint_blocks(OA,k,n,x):
r"""
Return `x` disjoint blocks contained in a given `OA(k,n)`.
`x` blocks of an `OA` are said to be disjoint if they all have
different values for a every given index, i.e. if they correspond to
disjoint blocks in the `TD` assciated with the `OA`.
INPUT:
- ``OA`` -- an orthogonal array
- ``k,n,x`` (integers)
.. SEEALSO::
:func:`incomplete_orthogonal_array`
EXAMPLES::
sage: from sage.combinat.designs.orthogonal_arrays import OA_find_disjoint_blocks
sage: k=3;n=4;x=3
sage: Bs = OA_find_disjoint_blocks(designs.orthogonal_arrays.build(k,n),k,n,x)
sage: assert len(Bs) == x
sage: for i in range(k):
....: assert len(set([B[i] for B in Bs])) == x
sage: OA_find_disjoint_blocks(designs.orthogonal_arrays.build(k,n),k,n,5)
Traceback (most recent call last):
...
ValueError: There does not exist 5 disjoint blocks in this OA(3,4)
"""
# Computing an independent set of order x with a Linear Program
from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException
p = MixedIntegerLinearProgram()
b = p.new_variable(binary=True)
p.add_constraint(p.sum(b[i] for i in range(len(OA))) == x)
# t[i][j] lists of blocks of the OA whose i'th component is j
t = [[[] for _ in range(n)] for _ in range(k)]
for c,B in enumerate(OA):
for i,j in enumerate(B):
t[i][j].append(c)
for R in t:
for L in R:
p.add_constraint(p.sum(b[i] for i in L) <= 1)
try:
p.solve()
except MIPSolverException:
raise ValueError("There does not exist {} disjoint blocks in this OA({},{})".format(x,k,n))
b = p.get_values(b)
independent_set = [OA[i] for i,v in b.items() if v]
return independent_set
def OA_relabel(OA,k,n,blocks=tuple(),matrix=None):
r"""
Return a relabelled version of the OA.
INPUT:
- ``OA`` -- an OA, or rather a list of blocks of length `k`, each
of which contains integers from `0` to `n-1`.
- ``k,n`` (integers)
- ``blocks`` (list of blocks) -- relabels the integers of the OA
from `[0..n-1]` into `[0..n-1]` in such a way that the `i`
blocks from ``block`` are respectively relabeled as
``[n-i,...,n-i]``, ..., ``[n-1,...,n-1]``. Thus, the blocks from
this list are expected to have disjoint values for each
coordinate.
If set to the empty list (default) no such relabelling is
performed.
- ``matrix`` -- a matrix of dimensions `k,n` such that if the i th
coordinate of a block is `x`, this `x` will be relabelled with
``matrix[i][x]``. This is not necessarily an integer between `0`
and `n-1`, and it is not necessarily an integer either. This is
performed *after* the previous relabelling.
If set to ``None`` (default) no such relabelling is performed.
.. NOTE::
A ``None`` coordinate in one block remains a ``None``
coordinate in the final block.
EXAMPLES::
sage: from sage.combinat.designs.orthogonal_arrays import OA_relabel
sage: OA = designs.orthogonal_arrays.build(3,2)
sage: OA_relabel(OA,3,2,matrix=[["A","B"],["C","D"],["E","F"]])
[['A', 'C', 'E'], ['A', 'D', 'F'], ['B', 'C', 'F'], ['B', 'D', 'E']]
sage: TD = OA_relabel(OA,3,2,matrix=[[0,1],[2,3],[4,5]]); TD
[[0, 2, 4], [0, 3, 5], [1, 2, 5], [1, 3, 4]]
sage: from sage.combinat.designs.orthogonal_arrays import is_transversal_design
sage: is_transversal_design(TD,3,2)
True
Making sure that ``[2,2,2,2]`` is a block of `OA(4,3)`. We do this
by relabelling block ``[0,0,0,0]`` which belongs to the design::
sage: designs.orthogonal_arrays.build(4,3)
[[0, 0, 0, 0], [0, 1, 2, 1], [0, 2, 1, 2], [1, 0, 2, 2], [1, 1, 1, 0], [1, 2, 0, 1], [2, 0, 1, 1], [2, 1, 0, 2], [2, 2, 2, 0]]
sage: OA_relabel(designs.orthogonal_arrays.build(4,3),4,3,blocks=[[0,0,0,0]])
[[2, 2, 2, 2], [2, 0, 1, 0], [2, 1, 0, 1], [0, 2, 1, 1], [0, 0, 0, 2], [0, 1, 2, 0], [1, 2, 0, 0], [1, 0, 2, 1], [1, 1, 1, 2]]
TESTS::
sage: OA_relabel(designs.orthogonal_arrays.build(3,2),3,2,blocks=[[0,1],[0,1]])
Traceback (most recent call last):
...
RuntimeError: Two block have the same coordinate for one of the k dimensions
"""
if blocks:
l = []
for i,B in enumerate(zip(*blocks)): # the blocks are disjoint
if len(B) != len(set(B)):
raise RuntimeError("Two block have the same coordinate for one of the k dimensions")
l.append(dict(zip([xx for xx in range(n) if xx not in B] + list(B),range(n))))
OA = [[l[i][x] for i,x in enumerate(R)] for R in OA]
if matrix:
OA = [[matrix[i][j] if j is not None else None for i,j in enumerate(R)] for R in OA]
return OA
def OA_n_times_2_pow_c_from_matrix(k,c,G,A,Y,check=True):
r"""
Return an `OA(k, |G| \cdot 2^c)` from a constrained `(G,k-1,2)`-difference
matrix.
This construction appears in [AbelCheng1994]_ and [AbelThesis]_.
Let `G` be an additive Abelian group. We denote by `H` a `GF(2)`-hyperplane
in `GF(2^c)`.
Let `A` be a `(k-1) \times 2|G|` array with entries in `G \times GF(2^c)`
and `Y` be a vector with `k-1` entries in `GF(2^c)`. Let `B` and `C` be
respectively the part of the array that belong to `G` and `GF(2^c)`.
The input `A` and `Y` must satisfy the following conditions. For any `i \neq
j` and `g \in G`:
- there are exactly two values of `s` such that `B_{i,s} - B_{j,s} = g`
(i.e. `B` is a `(G,k-1,2)`-difference matrix),
- let `s_1` and `s_2` denote the two values of `s` given above, then exactly
one of `C_{i,s_1} - C_{j,s_1}` and `C_{i,s_2} - C_{j,s_2}` belongs to the
`GF(2)`-hyperplane `(Y_i - Y_j) \cdot H` (we implicitely assumed that `Y_i
\not= Y_j`).
Under these conditions, it is easy to check that the array whose `k-1` rows
of length `|G|\cdot 2^c` indexed by `1 \leq i \leq k-1` given by `A_{i,s} +
(0, Y_i \cdot v)` where `1\leq s \leq 2|G|,v\in H` is a `(G \times
GF(2^c),k-1,1)`-difference matrix.
INPUT:
- ``k,c`` (integers) -- integers
- ``G`` -- an additive Abelian group
- ``A`` -- a matrix with entries in `G \times GF(2^c)`
- ``Y`` -- a vector with entries in `GF(2^c)`
- ``check`` -- (boolean) Whether to check that output is correct before
returning it. As this is expected to be useless (but we are cautious
guys), you may want to disable it whenever you want speed. Set to
``True`` by default.
.. NOTE::
By convention, a multiplicative generator `w` of `GF(2^c)^*` is fixed
(inside the function). The hyperplane `H` is the one spanned by `w^0,
w^1, \ldots, w^{c-1}`. The `GF(2^c)` part of the input matrix `A` and
vector `Y` are given in the following form: the integer `i` corresponds
to the element `w^i` and ``None`` corresponds to `0`.
.. SEEALSO::
Several examples use this construction:
- :func:`~sage.combinat.designs.database.OA_9_40`
- :func:`~sage.combinat.designs.database.OA_11_80`
- :func:`~sage.combinat.designs.database.OA_15_112`
- :func:`~sage.combinat.designs.database.OA_11_160`
- :func:`~sage.combinat.designs.database.OA_16_176`
- :func:`~sage.combinat.designs.database.OA_16_208`
- :func:`~sage.combinat.designs.database.OA_15_224`
- :func:`~sage.combinat.designs.database.OA_20_352`
- :func:`~sage.combinat.designs.database.OA_20_416`
- :func:`~sage.combinat.designs.database.OA_20_544`
- :func:`~sage.combinat.designs.database.OA_11_640`
- :func:`~sage.combinat.designs.database.OA_15_896`
EXAMPLE::
sage: from sage.combinat.designs.orthogonal_arrays import OA_n_times_2_pow_c_from_matrix
sage: from sage.combinat.designs.designs_pyx import is_orthogonal_array
sage: A = [
....: [(0,None),(0,None),(0,None),(0,None),(0,None),(0,None),(0,None),(0,None),(0,None),(0,None)],
....: [(0,None),(1,None), (2,2), (3,2), (4,2),(2,None),(3,None),(4,None), (0,2), (1,2)],
....: [(0,None), (2,5), (4,5), (1,2), (3,6), (3,4), (0,0), (2,1), (4,1), (1,6)],
....: [(0,None), (3,4), (1,4), (4,0), (2,5),(3,None), (1,0), (4,1), (2,2), (0,3)],
....: ]
sage: Y = [None, 0, 1, 6]
sage: OA = OA_n_times_2_pow_c_from_matrix(5,3,GF(5),A,Y)
sage: is_orthogonal_array(OA,5,40,2)
True
sage: A[0][0] = (1,None)
sage: OA_n_times_2_pow_c_from_matrix(5,3,GF(5),A,Y)
Traceback (most recent call last):
...
ValueError: the first part of the matrix A must be a
(G,k-1,2)-difference matrix
sage: A[0][0] = (0,0)
sage: OA_n_times_2_pow_c_from_matrix(5,3,GF(5),A,Y)
Traceback (most recent call last):
...
ValueError: B_2,0 - B_0,0 = B_2,6 - B_0,6 but the associated part of the
matrix C does not satisfies the required condition
REFERENCES:
.. [AbelThesis] On the Existence of Balanced Incomplete Block Designs and Transversal Designs,
Julian R. Abel,
PhD Thesis,
University of New South Wales,
1995
.. [AbelCheng1994] \R.J.R. Abel and Y.W. Cheng,
Some new MOLS of order 2np for p a prime power,
The Australasian Journal of Combinatorics, vol 10 (1994)
"""
from sage.rings.finite_rings.finite_field_constructor import FiniteField
from sage.rings.integer import Integer
from itertools import izip,combinations
from .designs_pyx import is_difference_matrix
G_card = G.cardinality()
if len(A) != k-1 or any(len(a) != 2*G_card for a in A):
raise ValueError("A must be a (k-1) x (2|G|) array")
if len(Y) != k-1:
raise ValueError("Y must be a (k-1)-vector")
F = FiniteField(2**c,'w')
GG = G.cartesian_product(F)
# dictionary from integers to elments of GF(2^c): i -> w^i, None -> 0
w = F.multiplicative_generator()
r = {i:w**i for i in xrange(2**c-1)}
r[None] = F.zero()
# check that the first part of the matrix A is a (G,k-1,2)-difference matrix
B = [[G(a) for a,b in R] for R in A]
if check and not is_difference_matrix(zip(*B),G,k-1,2):
raise ValueError("the first part of the matrix A must be a "
"(G,k-1,2)-difference matrix")
# convert:
# the matrix A to a matrix over G \times GF(2^c)
# the vector Y to a vector over GF(2^c)
A = [[GG((G(a),r[b])) for a,b in R] for R in A]
Y = [r[b] for b in Y]
# make the list of the elements of GF(2^c) which belong to the
# GF(2)-subspace <w^0,...,w^(c-2)> (that is the GF(2)-hyperplane orthogonal
# to w^(c-1))
H = [sum((r[i] for i in S), F.zero()) for s in range(c) for S in combinations(range(c-1),s)]
assert len(H) == 2**(c-1)
# check that the second part of the matrix A satisfy the conditions
if check:
G_card = G.cardinality()
for i in range(len(B)):
for j in range(i):
g_to_col_indices = {g: [] for g in G}
Hij = set([(Y[i] - Y[j]) * v for v in H])
for s in range(2 * G_card):
g_to_col_indices[B[i][s] - B[j][s]].append(s)
for s1,s2 in g_to_col_indices.itervalues():
v1 = A[i][s1][1] - A[j][s1][1]
v2 = A[i][s2][1] - A[j][s2][1]
if (v1 in Hij) == (v2 in Hij):
raise ValueError("B_{},{} - B_{},{} = B_{},{} - B_{},{} but"
" the associated part of the matrix C does not satisfies"
" the required condition".format(i,s1,j,s1,i,s2,j,s2))
# build the quasi difference matrix and return the associated OA
Mb = [[e+GG((G.zero(),x*v)) for v in H for e in R] for x,R in izip(Y,A)]
return OA_from_quasi_difference_matrix(zip(*Mb),GG,add_col=True)
def OA_from_quasi_difference_matrix(M,G,add_col=True,fill_hole=True):
r"""
Return an Orthogonal Array from a Quasi-Difference matrix
**Difference Matrices**
Let `G` be a group of order `g`. A *difference matrix* `M` is a `g\times k`
matrix with entries from `G` such that for any `1\leq i < j < k` the set
`\{d_{li}-d_{lj}:1\leq l \leq g\}` is equal to `G`.
By concatenating the `g` matrices `M+x` (where `x\in G`), one obtains a
matrix of size `g^2\times x` which is also an `OA(k,g)`.
**Quasi-difference Matrices**
A quasi-difference matrix is a difference matrix with missing entries. The
construction above can be applied again in this case, where the missing
entries in each column of `M` are replaced by unique values on which `G` has
a trivial action.
This produces an incomplete orthogonal array with a "hole" (i.e. missing
rows) of size 'u' (i.e. the number of missing values per column of `M`). If
there exists an `OA(k,u)`, then adding the rows of this `OA(k,u)` to the
incomplete orthogonal array should lead to an OA...
**Formal definition** (from the Handbook of Combinatorial Designs [DesignHandbook]_)
Let `G` be an abelian group of order `n`. A
`(n,k;\lambda,\mu;u)`-quasi-difference matrix (QDM) is a matrix `Q=(q_{ij})`
with `\lambda(n-1+2u)+\mu` rows and `k` columns, with each entry either
empty or containing an element of `G`. Each column contains exactly `\lambda
u` entries, and each row contains at most one empty entry. Furthermore, for
each `1 \leq i < j \leq k` the multiset
.. MATH::
\{ q_{li} - q_{lj}: 1 \leq l \leq \lambda (n-1+2u)+\mu, \text{ with }q_{li}\text{ and }q_{lj}\text{ not empty}\}
contains every nonzero element of `G` exactly `\lambda` times, and contains
0 exactly `\mu` times.
**Construction**
If a `(n,k;\lambda,\mu;u)`-QDM exists and `\mu \leq \lambda`, then an
`ITD_\lambda (k,n+u;u)` exists. Start with a `(n,k;\lambda,\mu;u)`-QDM `A`
over the group `G`. Append `\lambda-\mu` rows of zeroes. Then select `u`
elements `\infty_1,\dots,\infty_u` not in `G`, and replace the empty
entries, each by one of these infinite symbols, so that `\infty_i` appears
exactly once in each column. Develop the resulting matrix over the group `G`
(leaving infinite symbols fixed), to obtain a `\lambda (n^2+2nu)\times k`
matrix `T`. Then `T` is an orthogonal array with `k` columns and index
`\lambda`, having `n+u` symbols and one hole of size `u`.
Adding to `T` an `OA(k,u)` with elements `\infty_1,\dots,\infty_u` yields
the `ITD_\lambda(k,n+u;u)`.
For more information, see the Handbook of Combinatorial Designs
[DesignHandbook]_ or
`<http://web.cs.du.edu/~petr/milehigh/2013/Colbourn.pdf>`_.
INPUT:
- ``M`` -- the difference matrix whose entries belong to ``G``
- ``G`` -- a group
- ``add_col`` (boolean) -- whether to add a column to the final OA equal to
`(x_1,\dots,x_g,x_1,\dots,x_g,\dots)` where `G=\{x_1,\dots,x_g\}`.
- ``fill_hole`` (boolean) -- whether to return the incomplete orthogonal
array, or complete it with the `OA(k,u)` (default). When ``fill_hole is
None``, no block of the incomplete OA contains more than one value `\geq
|G|`.
EXAMPLES::
sage: _ = designs.orthogonal_arrays.build(6,20) # indirect doctest
"""
from itertools import izip
Gn = int(G.cardinality())
k = len(M[0])+bool(add_col)
G_to_int = {x:i for i,x in enumerate(G)}
# A cache for addition in G
G_sum = [[0]*Gn for _ in range(Gn)]
for x,i in G_to_int.iteritems():
for xx,ii in G_to_int.iteritems():
G_sum[i][ii] = G_to_int[x+xx]
# Convert M to integers
M = [[None if x is None else G_to_int[G(x)] for x in line] for line in M]
# Each line is expanded by [g+x for x in line for g in G] then relabeled
# with integers. Missing values are also handled.
new_M = []
for line in izip(*M):
inf = Gn
new_line = []
for x in line:
if x is None:
new_line.extend([inf]*Gn)
inf = inf + 1
else:
new_line.extend(G_sum[x])
new_M.append(new_line)
if add_col:
new_M.append([i//Gn for i in range(len(new_line))])
# new_M = transpose(new_M)
new_M = zip(*new_M)
# Filling holes with a smaller orthogonal array
if inf > Gn and fill_hole:
for L in orthogonal_array(k,inf-Gn,2):
new_M.append(tuple([x+Gn for x in L]))
return new_M
def OA_from_Vmt(m,t,V):
r"""
Return an Orthogonal Array from a `V(m,t)`
INPUT:
- ``m,t`` (integers)
- ``V`` -- the vector `V(m,t)`.
.. SEEALSO::
- :func:`QDM_from_Vmt`
- :func:`OA_from_quasi_difference_matrix`
EXAMPLES::
sage: _ = designs.orthogonal_arrays.build(6,46) # indirect doctest
"""
from sage.rings.finite_rings.finite_field_constructor import FiniteField
q = m*t+1
Fq, M = QDM_from_Vmt(m,t,V)
return OA_from_quasi_difference_matrix(M,Fq,add_col = False)
def QDM_from_Vmt(m,t,V):
r"""
Return a QDM from a `V(m,t)`
**Definition**
Let `q` be a prime power and let `q=mt+1` for `m,t` integers. Let `\omega`
be a primitive element of `\mathbb{F}_q`. A `V(m,t)` vector is a vector
`(a_1,\dots,a_{m+1}` for which, for each `1\leq k < m`, the differences
.. MATH::
\{a_{i+k}-a_i:1\leq i \leq m+1,i+k\neq m+2\}
represent the `m` cyclotomic classes of `\mathbb{F}_{mt+1}` (compute subscripts
modulo `m+2`). In other words, for fixed `k`, is
`a_{i+k}-a_i=\omega^{mx+\alpha}` and `a_{j+k}-a_j=\omega^{my+\beta}` then
`\alpha\not\equiv\beta \mod{m}`
*Construction of a quasi-difference matrix from a `V(m,t)` vector*
Starting with a `V(m,t)` vector `(a_1,\dots,a_{m+1})`, form a single row of
length `m+2` whose first entry is empty, and whose remaining entries are
`(a_1,\dots,a_{m+1})`. Form `t` rows by multiplying this row by the `t` th
roots, i.e. the powers of `\omega^m`. From each of these `t` rows, form
`m+2` rows by taking the `m+2` cyclic shifts of the row. The result is a
`(a,m+2;1,0;t)-QDM`.
For more information, refer to the Handbook of Combinatorial Designs
[DesignHandbook]_.
INPUT:
- ``m,t`` (integers)
- ``V`` -- the vector `V(m,t)`.
.. SEEALSO::
:func:`OA_from_quasi_difference_matrix`
EXAMPLES::
sage: _ = designs.orthogonal_arrays.build(6,46) # indirect doctest
"""
from sage.rings.finite_rings.finite_field_constructor import FiniteField
q = m*t+1
Fq = FiniteField(q, 'x')
w = Fq.multiplicative_generator()
M = []
wm = w**m
for i in range(t):
L = [None]
for e in V:
L.append(e*wm**i)
for ii in range(m+2):
M.append(L[-ii:]+L[:-ii]) # cyclic shift
M.append([0]*(m+2))
return Fq, M
def OA_from_PBD(k,n,PBD, check=True):
r"""
Return an `OA(k,n)` from a PBD
**Construction**
Let `\mathcal B` be a `(n,K,1)`-PBD. If there exists for every `i\in K` a
`TD(k,i)-i\times TD(k,1)` (i.e. if there exist `k` idempotent MOLS), then
one can obtain a `OA(k,n)` by concatenating:
- A `TD(k,i)-i\times TD(k,1)` defined over the elements of `B` for every `B
\in \mathcal B`.
- The rows `(i,...,i)` of length `k` for every `i\in [n]`.
.. NOTE::
This function raises an exception when Sage is unable to build the
necessary designs.
INPUT:
- ``k,n`` (integers)
- ``PBD`` -- a PBD on `0,...,n-1`.
EXAMPLES:
We start from the example VI.1.2 from the [DesignHandbook]_ to build an
`OA(3,10)`::
sage: from sage.combinat.designs.orthogonal_arrays import OA_from_PBD
sage: from sage.combinat.designs.designs_pyx import is_orthogonal_array
sage: pbd = [[0,1,2,3],[0,4,5,6],[0,7,8,9],[1,4,7],[1,5,8],
....: [1,6,9],[2,4,9],[2,5,7],[2,6,8],[3,4,8],[3,5,9],[3,6,7]]
sage: oa = OA_from_PBD(3,10,pbd)
sage: is_orthogonal_array(oa, 3, 10)
True
But we cannot build an `OA(4,10)` for this PBD (although there
exists an `OA(4,10)`::
sage: OA_from_PBD(4,10,pbd)
Traceback (most recent call last):
...
EmptySetError: There is no OA(n+1,n) - 3.OA(n+1,1) as all blocks intersect in a projective plane.
Or an `OA(3,6)` (as the PBD has 10 points)::
sage: _ = OA_from_PBD(3,6,pbd)
Traceback (most recent call last):
...
RuntimeError: PBD is not a valid Pairwise Balanced Design on [0,...,5]
"""
# Size of the sets of the PBD
K = set(map(len,PBD))
if check:
from .designs_pyx import is_pairwise_balanced_design
if not is_pairwise_balanced_design(PBD, n, K):
raise RuntimeError("PBD is not a valid Pairwise Balanced Design on [0,...,{}]".format(n-1))
# Building the IOA
OAs = {i:incomplete_orthogonal_array(k,i,(1,)*i) for i in K}
OA = []
# For every block B of the PBD we add to the OA rows covering all pairs of
# (distinct) coordinates within the elements of B.
for S in PBD:
for B in OAs[len(S)]:
OA.append([S[i] for i in B])
# Adding the 0..0, 1..1, 2..2 .... rows
for i in range(n):
OA.append([i]*k)
if check:
assert is_orthogonal_array(OA,k,n,2)
return OA
def OA_from_wider_OA(OA,k):
r"""
Return the first `k` columns of `OA`.
If `OA` has `k` columns, this function returns `OA` immediately.
INPUT:
- ``OA`` -- an orthogonal array.
- ``k`` (integer)
EXAMPLES::
sage: from sage.combinat.designs.orthogonal_arrays import OA_from_wider_OA
sage: OA_from_wider_OA(designs.orthogonal_arrays.build(6,20,2),1)[:5]
[(19,), (19,), (19,), (19,), (19,)]
sage: _ = designs.orthogonal_arrays.build(5,46) # indirect doctest
"""
if len(OA[0]) == k:
return OA
return [L[:k] for L in OA]
class OAMainFunctions():
r"""
Functions related to orthogonal arrays.
An orthogonal array of parameters `k,n,t` is a matrix with `k` columns
filled with integers from `[n]` in such a way that for any `t` columns, each
of the `n^t` possible rows occurs exactly once. In particular, the matrix
has `n^t` rows.
For more information on orthogonal arrays, see
:wikipedia:`Orthogonal_array`.
From here you have access to:
- :meth:`build(k,n,t=2) <build>`: return an orthogonal array with the given
parameters.
- :meth:`is_available(k,n,t=2) <is_available>`: answer whether there is a
construction available in Sage for a given set of parameters.
- :meth:`exists(k,n,t=2) <exists>`: answer whether an orthogonal array with
these parameters exist.
- :meth:`largest_available_k(n,t=2) <largest_available_k>`: return the
largest integer `k` such that Sage knows how to build an `OA(k,n)`.
- :meth:`explain_construction(k,n,t=2) <explain_construction>`: return a
string that explains the construction that Sage uses to build an
`OA(k,n)`.
EXAMPLES::
sage: designs.orthogonal_arrays.build(3,2)
[[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]
sage: designs.orthogonal_arrays.build(5,5)
[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4], [0, 2, 4, 1, 3],
[0, 3, 1, 4, 2], [0, 4, 3, 2, 1], [1, 0, 4, 3, 2],
[1, 1, 1, 1, 1], [1, 2, 3, 4, 0], [1, 3, 0, 2, 4],
[1, 4, 2, 0, 3], [2, 0, 3, 1, 4], [2, 1, 0, 4, 3],
[2, 2, 2, 2, 2], [2, 3, 4, 0, 1], [2, 4, 1, 3, 0],
[3, 0, 2, 4, 1], [3, 1, 4, 2, 0], [3, 2, 1, 0, 4],
[3, 3, 3, 3, 3], [3, 4, 0, 1, 2], [4, 0, 1, 2, 3],
[4, 1, 3, 0, 2], [4, 2, 0, 3, 1], [4, 3, 2, 1, 0],
[4, 4, 4, 4, 4]]
What is the largest value of `k` for which Sage knows how to compute a
`OA(k,14,2)`?::
sage: designs.orthogonal_arrays.largest_available_k(14)
6
If you ask for an orthogonal array that does not exist, then you will
either obtain an ``EmptySetError`` (if it knows that such an orthogonal array
does not exist) or a ``NotImplementedError``::
sage: designs.orthogonal_arrays.build(4,2)
Traceback (most recent call last):
...
EmptySetError: There exists no OA(4,2) as k(=4)>n+t-1=3
sage: designs.orthogonal_arrays.build(12,20)
Traceback (most recent call last):
...
NotImplementedError: I don't know how to build an OA(12,20)!
"""
def __init__(self,*args,**kwds):
r"""
There is nothing here.
TESTS::
sage: designs.orthogonal_arrays(4,5) # indirect doctest
Traceback (most recent call last):
...
RuntimeError: This is not a function but a class. You want to call the designs.orthogonal_arrays.* functions
"""
raise RuntimeError("This is not a function but a class. You want to call the designs.orthogonal_arrays.* functions")
largest_available_k = staticmethod(largest_available_k)
@staticmethod
def explain_construction(k,n,t=2):
r"""
Return a string describing how to builds an `OA(k,n)`
INPUT:
- ``k,n,t`` (integers) -- parameters of the orthogonal array.
EXAMPLE::
sage: designs.orthogonal_arrays.explain_construction(9,565)
"Wilson's construction n=23.24+13 with master design OA(9+1,23)"
sage: designs.orthogonal_arrays.explain_construction(10,154)
'the database contains a (137,10;1,0;17)-quasi difference matrix'
"""
return orthogonal_array(k,n,t,explain_construction=True)
@staticmethod
def build(k,n,t=2,resolvable=False):
r"""
Return an `OA(k,n)` of strength `t`
An orthogonal array of parameters `k,n,t` is a matrix with `k`
columns filled with integers from `[n]` in such a way that for any
`t` columns, each of the `n^t` possible rows occurs exactly
once. In particular, the matrix has `n^t` rows.
More general definitions sometimes involve a `\lambda` parameter, and we
assume here that `\lambda=1`.
For more information on orthogonal arrays, see
:wikipedia:`Orthogonal_array`.
INPUT:
- ``k,n,t`` (integers) -- parameters of the orthogonal array.
- ``resolvable`` (boolean) -- set to ``True`` if you want the design to be
resolvable. The `n` classes of the resolvable design are obtained as the
first `n` blocks, then the next `n` blocks, etc ... Set to ``False`` by
default.
EXAMPLES::
sage: designs.orthogonal_arrays.build(3,3,resolvable=True) # indirect doctest
[[0, 0, 0],
[1, 2, 1],
[2, 1, 2],
[0, 2, 2],
[1, 1, 0],
[2, 0, 1],
[0, 1, 1],
[1, 0, 2],
[2, 2, 0]]
sage: OA_7_50 = designs.orthogonal_arrays.build(7,50) # indirect doctest
"""
return orthogonal_array(k,n,t,resolvable=resolvable)
@staticmethod
def exists(k,n,t=2):
r"""
Return the existence status of an `OA(k,n)`
INPUT:
- ``k,n,t`` (integers) -- parameters of the orthogonal array.
.. WARNING::
The function does not only return booleans, but ``True``,
``False``, or ``Unknown``.
.. SEEALSO::
:meth:`is_available`
EXAMPLE::
sage: designs.orthogonal_arrays.exists(3,6) # indirect doctest
True
sage: designs.orthogonal_arrays.exists(4,6) # indirect doctest
Unknown
sage: designs.orthogonal_arrays.exists(7,6) # indirect doctest
False
"""
return orthogonal_array(k,n,t,existence=True)
@staticmethod
def is_available(k,n,t=2):
r"""
Return whether Sage can build an `OA(k,n)`.
INPUT:
- ``k,n,t`` (integers) -- parameters of the orthogonal array.
.. SEEALSO::
:meth:`exists`
EXAMPLE::
sage: designs.orthogonal_arrays.is_available(3,6) # indirect doctest
True
sage: designs.orthogonal_arrays.is_available(4,6) # indirect doctest
False
"""
return orthogonal_array(k,n,t,existence=True) is True
|
the-stack_0_16333
|
import numpy as np
import os
import json
import time
from .utils import log
import pdb
def get_relationship_feat(committee, pairs):
start = time.time()
votefeat = []
for i,cmt in enumerate(committee):
log("\t\tprocessing: {}/{}".format(i, len(committee)))
knn = cmt[0]
k = knn.shape[1]
find0 = (knn[pairs[:,0], :] == np.tile(pairs[:,1:], (1, k))).any(axis=1, keepdims=True)
find1 = (knn[pairs[:,1], :] == np.tile(pairs[:,:1], (1, k))).any(axis=1, keepdims=True)
votefeat.append((find0 | find1).astype(np.float32))
log('\t\trelationship feature done. time: {}'.format(time.time() - start))
return np.hstack(votefeat)
def cosine_similarity(feat1, feat2):
assert feat1.shape == feat2.shape
feat1 /= np.linalg.norm(feat1, axis=1).reshape(-1, 1)
feat2 /= np.linalg.norm(feat2, axis=1).reshape(-1, 1)
return np.einsum('ij,ij->i', feat1, feat2).reshape(-1, 1).reshape(-1, 1) # row-wise dot
def get_affinity_feat(features, pairs):
start = time.time()
cosine_simi = []
for i,feat in enumerate(features):
log("\t\tprocessing: {}/{}".format(i, len(features)))
cosine_simi.append(cosine_similarity(feat[pairs[:,0],:], feat[pairs[:,1],:]))
log('\t\taffinity feature done. time: {}'.format(time.time() - start))
return np.concatenate(cosine_simi, axis=1)
def intersection(array1, array2, trunk=-1):
'''
To find row wise intersection size.
Input: array1, array2: Nxk np array
trunk: if out of memory, set trunk to be smaller, e.g., 100000;
note than small trunk will increase the processing time.
'''
N, k = array1.shape
if trunk == -1:
tile1 = np.tile(array1.reshape(N, k, 1), (1, 1, k))
tile2 = np.tile(array2.reshape(N, 1, k), (1, k, 1))
inter_num = ((tile1 == tile2) & (tile1 != -1) & (tile2 != -1)).sum(axis=(1,2))
else:
inter_num = []
for i in range(0, N, trunk):
end = min(i + trunk, N)
L = end - i
tile1 = np.tile(array1[i:end].reshape(L, k, 1), (1, 1, k))
tile2 = np.tile(array2[i:end].reshape(L, 1, k), (1, k, 1))
inter_num.append(((tile1 == tile2) & (tile1 != -1) & (tile2 != -1)).sum(axis=(1,2)))
inter_num = np.concatenate(inter_num, axis=0)
return inter_num
def get_structure_feat(members, pairs):
start = time.time()
distr_commnb = []
for i,m in enumerate(members):
log("\t\tprocessing: {}/{}".format(i, len(members)))
knn = m[0]
#comm_neighbor = np.array([len(np.intersect1d(knn[p[0]], knn[p[1]], assume_unique=True)) for p in pairs]).astype(np.float32)[:,np.newaxis]
comm_neighbor = intersection(knn[pairs[:,0], :], knn[pairs[:,1], :])[:, np.newaxis]
distr_commnb.append(comm_neighbor)
log('\t\tstructure feature done. time: {}'.format(time.time() - start))
return np.hstack(distr_commnb)
def create_pairs(base):
pairs = []
knn = base[0]
anchor = np.tile(np.arange(len(knn)).reshape(len(knn), 1), (1, knn.shape[1]))
selidx = np.where((knn != -1) & (knn != anchor))
pairs = np.hstack((anchor[selidx].reshape(-1, 1), knn[selidx].reshape(-1, 1)))
pairs = np.sort(pairs, axis=1)
pairs = np.unique(pairs, axis=0)
return pairs
def get_label(id_label, pairs):
return (id_label[pairs[:,0]] == id_label[pairs[:,1]]).astype(np.float32)[:,np.newaxis]
def create(data_name, args, phase='test'):
if phase == 'test':
output = "{}/output/pairset/k{}".format(args.exp_root, args.k)
else:
output = "data/{}/pairset/k{}".format(data_name, args.k)
members = [args.base] + args.committee
# loading
if 'affinity' in args.mediator['input'] and not os.path.isfile(output + "/affinity.npy"):
log("\tLoading features")
features = []
for m in members:
features.append(np.fromfile('data/{}/features/{}.bin'.format(data_name, m), dtype=np.float32).reshape(-1, args.feat_dim))
if not os.path.isfile(output + "/pairs.npy") or not os.path.isfile(output + "/structure.npy"):
log("\tLoading base KNN")
knn_file = np.load('data/{}/knn/{}_k{}.npz'.format(data_name, args.base, args.k))
knn_base = (knn_file['idx'], knn_file['dist'])
if 'relationship' in args.mediator['input'] or 'structure' in args.mediator['input']:
log("\tLoading committee KNN")
knn_committee = []
committee_knn_fn = ['data/{}/knn/{}_k{}.npz'.format(data_name, cmt, args.k) for cmt in args.committee]
for cfn in committee_knn_fn:
knn_file = np.load(cfn)
knn_committee.append((knn_file['idx'], knn_file['dist']))
if not os.path.isdir(output):
os.makedirs(output)
# get pairs
if os.path.isfile(output + "/pairs.npy"):
log('\tLoading pairs')
pairs = np.load(output + "/pairs.npy")
else:
log('\tgetting pairs')
pairs = create_pairs(knn_base)
np.save(output + "/pairs.npy", pairs)
log('\tgot {} pairs'.format(len(pairs)))
# get features
if 'relationship' in args.mediator['input']:
if not os.path.isfile(output + "/relationship.npy"):
log('\tgetting relationship features')
relationship_feat = get_relationship_feat(knn_committee, pairs)
np.save(output + "/relationship.npy", relationship_feat)
else:
log("\trelationship features exist")
if 'affinity' in args.mediator['input']:
if not os.path.isfile(output + "/affinity.npy"):
log('\tgetting affinity features')
affinity_feat = get_affinity_feat(features, pairs)
np.save(output + "/affinity.npy", affinity_feat)
else:
log("\taffinity features exist")
if 'structure' in args.mediator['input']:
if not os.path.isfile(output + "/structure.npy"):
log('\tgetting structure features')
structure_feat = get_structure_feat([knn_base] + knn_committee, pairs)
np.save(output + "/structure.npy", structure_feat)
else:
log("\tstructure features exist")
# get labels when training
if phase == 'train' or args.evaluation:
if not os.path.isfile(output + "/pair_label.npy"):
if not os.path.isfile("data/{}/meta.txt".format(data_name)):
raise Exception("Meta file not exist: {}, please create meta.txt or set evaluation to False".format("data/{}/meta.txt".format(data_name)))
with open("data/{}/meta.txt".format(data_name), 'r') as f:
lines = f.readlines()
log('\tgetting pairs label')
id_label = np.array([int(l.strip()) for l in lines])
label = get_label(id_label, pairs)
np.save(output + "/pair_label.npy", label)
else:
log("\tpairs label exist")
|
the-stack_0_16335
|
from logging import getLogger, StreamHandler, DEBUG, INFO
from sys import stdout
def setup_logging(debug=False):
logger = getLogger('raptiformica')
logger.setLevel(DEBUG if debug else INFO)
console_handler = StreamHandler(stdout)
logger.addHandler(console_handler)
return logger
|
the-stack_0_16338
|
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
import time
import os
from fully_conv_nets import VGGNet, FCNs
import argparse
parser = argparse.ArgumentParser(description="This program trains a Neural Network to detect either cars or roads. It can also load a pretrained model to predict new roads and cars.")
parser.add_argument('type', choices=['roads','cars'], help="Choose the type of model to train/load.")
parser.add_argument('-v', '--validate', action='store_true', help="When passing v, a model will be loaded and validated using validation images. If this argument is not passed, then a new model will be trained and stored in the models folder.")
parser.add_argument('-p', '--persist', action='store_true', help="Persist image.")
parser.add_argument('-s', '--show', action='store_true', help="Show image.")
args = parser.parse_args()
n_class = 2
batch_size = 2
epochs = 200
lr = 1e-4
momentum = 0
w_decay = 1e-5
step_size = 50
gamma = 0.5
if args.type == 'roads':
configs = "roads-CrossEnt_batch{}_epoch{}_RMSprop_scheduler-step{}-gamma{}_lr{}_momentum{}_w_decay{}".format(batch_size, epochs, step_size, gamma, lr, momentum, w_decay)
raw_imgs_dir = 'dataset/raw_imgs'
masks_dir = 'dataset/masks'
model_to_load = "FCNs-BCEWithLogits_batch3_epoch90_RMSprop_scheduler-step50-gamma0.5_lr0.0001_momentum0_w_decay1e-05"
validation_imgs = 'dataset/validation_imgs'
validation_masks = 'dataset/validation_masks'
predictions_path = 'dataset/road_preds/prediction_'
pred_imgs = 'dataset/road_pred_imgs'
else:
configs = "cars-CrossEnt_batch{}_epoch{}_RMSprop_scheduler-step{}-gamma{}_lr{}_momentum{}_w_decay{}".format(batch_size, epochs, step_size, gamma, lr, momentum, w_decay)
raw_imgs_dir = 'dataset/car_raw_imgs'
masks_dir = 'dataset/car_masks'
model_to_load = "cars-CrossEnt_batch2_epoch100_RMSprop_scheduler-step50-gamma0.5_lr0.0001_momentum0_w_decay1e-05"
validation_imgs = 'dataset/validation_imgs'
validation_masks = 'dataset/validation_masks'
predictions_path = 'dataset/car_preds/prediction_'
pred_imgs = 'dataset/car_pred_imgs'
# create dir for model
model_dir = "models"
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_path = os.path.join(model_dir, configs)
use_gpu = torch.cuda.is_available()
vgg_model = VGGNet(requires_grad=True, remove_fc=True, model='vgg16')
from lib import LastModel
fcn_model = FCNs(pretrained_net=vgg_model, n_class=n_class, last_layer=LastModel(32, n_class))
if use_gpu:
ts = time.time()
vgg_model = vgg_model.cuda()
fcn_model = fcn_model.cuda()
print("Finish cuda loading, time elapsed {}".format(time.time() - ts))
criterion = nn.CrossEntropyLoss()
optimizer = optim.RMSprop(fcn_model.parameters(), lr=lr, momentum=momentum, weight_decay=w_decay)
scheduler = lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma) # decay LR by a factor of 0.5 every 30 epochs
training_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0)
])
from lib import TensorDataset
dataset = TensorDataset(raw_imgs_dir, masks_dir, args.type, transform=training_transform)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)
def train():
for epoch in range(epochs):
scheduler.step()
ts = time.time()
for iter, batch in enumerate(loader):
optimizer.zero_grad()
if use_gpu:
inputs = Variable(batch['X'].cuda())
labels = Variable(batch['Y'].cuda())
else:
inputs, labels = Variable(batch['X']), Variable(batch['Y'])
outputs = fcn_model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if iter % 10 == 0:
print("epoch{}, iter{}, loss: {}".format(epoch, iter, loss.item()))
print("Finish epoch {}, time elapsed {}".format(epoch, time.time() - ts))
torch.save(fcn_model, model_path)
if __name__ == "__main__":
if args.validate:
fcn_model = torch.load("models/" + model_to_load)
else:
train()
validation_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
validation_dataset = TensorDataset(validation_imgs, validation_masks, args.type, transform=validation_transform)
loader = torch.utils.data.DataLoader(validation_dataset)
import pickle
from lib import get_simple_masked_img
for idx, batch in enumerate(loader):
if use_gpu:
inputs = Variable(batch['X'].cuda())
labels = Variable(batch['Y'].cuda())
else:
inputs, labels = Variable(batch['X']), Variable(batch['Y'])
y_val_pred = fcn_model(inputs)
str_idx = str(idx + 1)
img_name = ('0' * (7 - len(str_idx) + 1)) + str_idx + '.png'
raw_img = Image.open(validation_imgs + "/" + img_name).convert('RGB')
get_simple_masked_img(y_val_pred[0], raw_img, pred_imgs, img_name, args.persist, args.show)
with open(predictions_path + str_idx + '.pred', 'wb') as handle:
pickle.dump((y_val_pred[0], raw_img), handle, protocol=pickle.HIGHEST_PROTOCOL)
|
the-stack_0_16339
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 18 12:31:06 2019
@author: MAGESHWARAN
"""
import cv2
import numpy as np
from image_processing import one_over_other
def detect_edges(image, kernel=np.ones((5, 5), dtype=np.uint8)):
""" Perform Edge detection on the image using Morphology Gradient
Inputs:
image (np.array) : input Image
kernel (np.array): Filter to be used on the image
Output:
result(np.array) : Image with Edges detected
"""
image = image.astype(np.uint8)
result = cv2.morphologyEx(image, cv2.MORPH_GRADIENT, kernel)
return result
def gradients(image, method="laplacian", ksize=5, **kwargs):
""" Perform Edge detection on the image using sobel or laplace methods
Inputs:
image (np.array) : input Image
method (string) : either sobel or laplacian
ksize (int) : Size of the kernel to be used
axis (int) : 0 for sobel operation in 'x' axis
1 for sobel operation in 'y' axis
2 for sobel operation in 'x,y' axis
Output:
result(np.array) : Image with Edges detected
"""
if method == "sobel":
axis = kwargs.pop("axis")
if axis == 0:
sobel_x = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=ksize)
return sobel_x
elif axis == 1:
sobel_y = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=ksize)
return sobel_y
else:
sobel_x = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=ksize)
sobel_y = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=ksize)
sobel = one_over_other(sobel_x, sobel_y, blend=True)
return sobel
elif method == "laplacian":
laplacian = cv2.Laplacian(image, cv2.CV_64F, ksize=ksize)
return laplacian
if __name__=="__main__":
# ------------------------- Edge Detection --------------------------------
image = load_image("./data/binary.jpg", gray=True)
edge_detected = detect_edges(image)
# Sobel operator for edge detection
sudoku = load_image("./data/sudoku.jpg", gray=True)
sobel = gradients(sudoku, method="sobel", axis=2, ksize=5)
display_image(sobel, gray=True)
cv2.imwrite("./data/sobel_xy.jpg", sobel)
|
the-stack_0_16340
|
from octopus.core import app
from octopus.modules.jper import models
from octopus.lib import http, dates
import json
class JPERException(Exception):
pass
class JPERConnectionException(JPERException):
pass
class JPERAuthException(JPERException):
pass
class ValidationException(JPERException):
pass
class JPER(object):
# FilesAndJATS = "http://router.jisc.ac.uk/packages/FilesAndJATS"
#FilesAndJATS = "https://pubrouter.jisc.ac.uk/FilesAndJATS"
FilesAndJATS = "https://datahub.deepgreen.org/FilesAndJATS"
def __init__(self, api_key=None, base_url=None):
self.api_key = api_key if api_key is not None else app.config.get("JPER_API_KEY")
self.base_url = base_url if base_url is not None else app.config.get("JPER_BASE_URL")
if self.base_url.endswith("/"):
self.base_url = self.base_url[:-1]
def _url(self, endpoint=None, id=None, auth=True, params=None, url=None):
if url is None:
url = self.base_url
if url.endswith("/"):
url += url[:-1]
if endpoint is not None:
url += "/" + endpoint
if id is not None:
url += "/" + http.quote(id)
if auth:
if params is None:
params = {}
if self.api_key is not None and self.api_key != "":
params["api_key"] = self.api_key
args = []
for k, v in params.items():
args.append(k + "=" + http.quote(str(v)))
if len(args) > 0:
if "?" not in url:
url += "?"
else:
url += "&"
qs = "&".join(args)
url += qs
return url
def validate(self, notification, file_handle=None):
# turn the notification into a json string
data = None
if isinstance(notification, models.IncomingNotification):
data = notification.json()
else:
data = json.dumps(notification)
# get the url that we are going to send to
url = self._url("validate")
# 2016-06-20 TD : switch SSL verification off
verify = False
resp = None
if file_handle is None:
# if there is no file handle supplied, send the metadata-only notification
resp = http.post(url, data=data, headers={"Content-Type" : "application/json"}, verify=verify)
else:
# otherwise send both parts as a multipart message
files = [
("metadata", ("metadata.json", data, "application/json")),
("content", ("content.zip", file_handle, "application/zip"))
]
resp = http.post(url, files=files, verify=verify)
if resp is None:
raise JPERConnectionException("Unable to communicate with the JPER API")
if resp.status_code == 401:
raise JPERAuthException("Could not authenticate with JPER with your API key")
if resp.status_code == 400:
raise ValidationException(resp.json().get("error"))
return True
def create_notification(self, notification, file_handle=None):
# turn the notification into a json string
data = None
if isinstance(notification, models.IncomingNotification):
data = notification.json()
else:
data = json.dumps(notification)
# get the url that we are going to send to
url = self._url("notification")
# 2016-06-20 TD : switch SSL verification off
verify = False
resp = None
if file_handle is None:
# if there is no file handle supplied, send the metadata-only notification
resp = http.post(url, data=data, headers={"Content-Type" : "application/json"}, verify=verify)
else:
# otherwise send both parts as a multipart message
files = [
("metadata", ("metadata.json", data, "application/json")),
("content", ("content.zip", file_handle, "application/zip"))
]
resp = http.post(url, files=files, verify=verify)
if resp is None:
raise JPERConnectionException("Unable to communicate with the JPER API")
if resp.status_code == 401:
raise JPERAuthException("Could not authenticate with JPER with your API key")
if resp.status_code == 400:
raise ValidationException(resp.json().get("error"))
# extract the useful information from the acceptance response
acc = resp.json()
id = acc.get("id")
loc = acc.get("location")
return id, loc
def get_notification(self, notification_id=None, location=None):
# get the url that we are going to send to
if notification_id is not None:
url = self._url("notification", id=notification_id)
elif location is not None:
url = location
else:
raise JPERException("You must supply either the notification_id or the location")
# 2016-06-20 TD : switch SSL verification off
verify = False
# get the response object
resp = http.get(url, verify=verify)
if resp is None:
raise JPERConnectionException("Unable to communicate with the JPER API")
if resp.status_code == 404:
return None
if resp.status_code != 200:
raise JPERException("Received unexpected status code from {y}: {x}".format(x=resp.status_code, y=url))
j = resp.json()
if "provider" in j:
return models.ProviderOutgoingNotification(j)
else:
return models.OutgoingNotification(j)
def get_content(self, url, chunk_size=8096):
# just sort out the api_key
url = self._url(url=url)
# 2016-06-20 TD : switch SSL verification off
verify = False
# get the response object
resp, content, downloaded_bytes = http.get_stream(url, read_stream=False, verify=verify)
# check for errors or problems with the response
if resp is None:
raise JPERConnectionException("Unable to communicate with the JPER API")
if resp.status_code == 401:
raise JPERAuthException("Could not authenticate with JPER with your API key")
if resp.status_code != 200:
raise JPERException("Received unexpected status code from {y}: {x}".format(x=resp.status_code, y=url))
# return the response object, in case the caller wants access to headers, etc.
return resp.iter_content(chunk_size=chunk_size), resp.headers
def list_notifications(self, since, page=None, page_size=None, repository_id=None):
# check that the since date is valid, and get it into the right format
if not hasattr(since, "strftime"):
since = dates.parse(since)
since = since.strftime("%Y-%m-%dT%H:%M:%SZ")
# make the url params into an object
params = {"since" : since}
if page is not None:
try:
params["page"] = str(page)
except:
raise JPERException("Unable to convert page argument to string")
if page_size is not None:
try:
params["pageSize"] = str(page_size)
except:
raise JPERException("Unable to convert page_size argument to string")
# get the url, which may contain the repository id if it is not None
url = self._url("routed", id=repository_id, params=params)
# 2016-06-20 TD : switch SSL verification off
verify = False
# get the response object
resp = http.get(url, verify=verify)
# check for errors or problems with the response
if resp is None:
raise JPERConnectionException("Unable to communicate with the JPER API")
if resp.status_code == 401:
raise JPERAuthException("Could not authenticate with JPER with your API key")
if resp.status_code == 400:
raise JPERException(resp.json().get("error"))
if resp.status_code != 200:
raise JPERException("Received unexpected status code from {y}: {x} ".format(x=resp.status_code, y=url))
# create the notification list object
j = resp.json()
return models.NotificationList(j)
def iterate_notifications(self, since, repository_id=None, page_size=100):
page = 1
while True:
nl = self.list_notifications(since, page=page, page_size=page_size, repository_id=repository_id)
if len(nl.notifications) == 0:
break
for n in nl.notifications:
yield n
if page * page_size >= nl.total:
break
page += 1
def record_retrieval(self, notification_id, content_id=None):
# FIXME: not yet implemented, while waiting to see how retrieval finally
# works
pass
|
the-stack_0_16342
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import os
import unittest
import frappe
from frappe.utils import cint
from frappe.model.naming import revert_series_if_last, make_autoname, parse_naming_series
class TestDocument(unittest.TestCase):
def test_get_return_empty_list_for_table_field_if_none(self):
d = frappe.get_doc({"doctype":"User"})
self.assertEqual(d.get("roles"), [])
def test_load(self):
d = frappe.get_doc("DocType", "User")
self.assertEqual(d.doctype, "DocType")
self.assertEqual(d.name, "User")
self.assertEqual(d.allow_rename, 1)
self.assertTrue(isinstance(d.fields, list))
self.assertTrue(isinstance(d.permissions, list))
self.assertTrue(filter(lambda d: d.fieldname=="email", d.fields))
def test_load_single(self):
d = frappe.get_doc("Website Settings", "Website Settings")
self.assertEqual(d.name, "Website Settings")
self.assertEqual(d.doctype, "Website Settings")
self.assertTrue(d.disable_signup in (0, 1))
def test_insert(self):
d = frappe.get_doc({
"doctype":"Event",
"subject":"test-doc-test-event 1",
"starts_on": "2014-01-01",
"event_type": "Public"
})
d.insert()
self.assertTrue(d.name.startswith("EV"))
self.assertEqual(frappe.db.get_value("Event", d.name, "subject"),
"test-doc-test-event 1")
# test if default values are added
self.assertEqual(d.send_reminder, 1)
return d
def test_insert_with_child(self):
d = frappe.get_doc({
"doctype":"Event",
"subject":"test-doc-test-event 2",
"starts_on": "2014-01-01",
"event_type": "Public"
})
d.insert()
self.assertTrue(d.name.startswith("EV"))
self.assertEqual(frappe.db.get_value("Event", d.name, "subject"),
"test-doc-test-event 2")
def test_update(self):
d = self.test_insert()
d.subject = "subject changed"
d.save()
self.assertEqual(frappe.db.get_value(d.doctype, d.name, "subject"), "subject changed")
def test_value_changed(self):
d = self.test_insert()
d.subject = "subject changed again"
d.save()
self.assertTrue(d.has_value_changed('subject'))
self.assertFalse(d.has_value_changed('event_type'))
def test_mandatory(self):
# TODO: recheck if it is OK to force delete
frappe.delete_doc_if_exists("User", "[email protected]", 1)
d = frappe.get_doc({
"doctype": "User",
"email": "[email protected]",
})
self.assertRaises(frappe.MandatoryError, d.insert)
d.set("first_name", "Test Mandatory")
d.insert()
self.assertEqual(frappe.db.get_value("User", d.name), d.name)
def test_conflict_validation(self):
d1 = self.test_insert()
d2 = frappe.get_doc(d1.doctype, d1.name)
d1.save()
self.assertRaises(frappe.TimestampMismatchError, d2.save)
def test_conflict_validation_single(self):
d1 = frappe.get_doc("Website Settings", "Website Settings")
d1.home_page = "test-web-page-1"
d2 = frappe.get_doc("Website Settings", "Website Settings")
d2.home_page = "test-web-page-1"
d1.save()
self.assertRaises(frappe.TimestampMismatchError, d2.save)
def test_permission(self):
frappe.set_user("Guest")
self.assertRaises(frappe.PermissionError, self.test_insert)
frappe.set_user("Administrator")
def test_permission_single(self):
frappe.set_user("Guest")
d = frappe.get_doc("Website Settings", "Website Settings")
self.assertRaises(frappe.PermissionError, d.save)
frappe.set_user("Administrator")
def test_link_validation(self):
frappe.delete_doc_if_exists("User", "[email protected]", 1)
d = frappe.get_doc({
"doctype": "User",
"email": "[email protected]",
"first_name": "Link Validation",
"roles": [
{
"role": "ABC"
}
]
})
self.assertRaises(frappe.LinkValidationError, d.insert)
d.roles = []
d.append("roles", {
"role": "System Manager"
})
d.insert()
self.assertEqual(frappe.db.get_value("User", d.name), d.name)
def test_validate(self):
d = self.test_insert()
d.starts_on = "2014-01-01"
d.ends_on = "2013-01-01"
self.assertRaises(frappe.ValidationError, d.validate)
self.assertRaises(frappe.ValidationError, d.run_method, "validate")
self.assertRaises(frappe.ValidationError, d.save)
def test_update_after_submit(self):
d = self.test_insert()
d.starts_on = "2014-09-09"
self.assertRaises(frappe.UpdateAfterSubmitError, d.validate_update_after_submit)
d.meta.get_field("starts_on").allow_on_submit = 1
d.validate_update_after_submit()
d.meta.get_field("starts_on").allow_on_submit = 0
# when comparing date(2014, 1, 1) and "2014-01-01"
d.reload()
d.starts_on = "2014-01-01"
d.validate_update_after_submit()
def test_varchar_length(self):
d = self.test_insert()
d.subject = "abcde"*100
self.assertRaises(frappe.CharacterLengthExceededError, d.save)
def test_xss_filter(self):
d = self.test_insert()
# script
xss = '<script>alert("XSS")</script>'
escaped_xss = xss.replace('<', '<').replace('>', '>')
d.subject += xss
d.save()
d.reload()
self.assertTrue(xss not in d.subject)
self.assertTrue(escaped_xss in d.subject)
# onload
xss = '<div onload="alert("XSS")">Test</div>'
escaped_xss = '<div>Test</div>'
d.subject += xss
d.save()
d.reload()
self.assertTrue(xss not in d.subject)
self.assertTrue(escaped_xss in d.subject)
# css attributes
xss = '<div style="something: doesn\'t work; color: red;">Test</div>'
escaped_xss = '<div style="">Test</div>'
d.subject += xss
d.save()
d.reload()
self.assertTrue(xss not in d.subject)
self.assertTrue(escaped_xss in d.subject)
def test_naming_series(self):
data = ["TEST-", "TEST/17-18/.test_data./.####", "TEST.YYYY.MM.####"]
for series in data:
name = make_autoname(series)
prefix = series
if ".#" in series:
prefix = series.rsplit('.',1)[0]
prefix = parse_naming_series(prefix)
old_current = frappe.db.get_value('Series', prefix, "current", order_by="name")
revert_series_if_last(series, name)
new_current = cint(frappe.db.get_value('Series', prefix, "current", order_by="name"))
self.assertEqual(cint(old_current) - 1, new_current)
def test_non_negative_check(self):
frappe.delete_doc_if_exists("Currency", "Frappe Coin", 1)
d = frappe.get_doc({
'doctype': 'Currency',
'currency_name': 'Frappe Coin',
'smallest_currency_fraction_value': -1
})
self.assertRaises(frappe.NonNegativeError, d.insert)
d.set('smallest_currency_fraction_value', 1)
d.insert()
self.assertEqual(frappe.db.get_value("Currency", d.name), d.name)
frappe.delete_doc_if_exists("Currency", "Frappe Coin", 1)
|
the-stack_0_16343
|
"""
Common Python utilities for interacting with the dashboard infra.
"""
import argparse
import datetime
import json
import logging
import os
import sys
def print_log(msg, dec_char='*'):
padding = max(list(map(len, str(msg).split('\n'))))
decorate = dec_char * (padding + 4)
print(f'{decorate}\n{msg}\n{decorate}')
def validate_json(dirname, *fields, filename='status.json'):
if not check_file_exists(dirname, filename):
return {'success': False, 'message': 'No {} in {}'.format(filename, dirname)}
fp = read_json(dirname, filename)
for required_field in fields:
if required_field not in fp:
return {'success': False,
'message': '{} in {} has no \'{}\' field'.format(filename, dirname, required_field)}
return fp
def check_file_exists(dirname, filename):
dirname = os.path.expanduser(dirname)
full_name = os.path.join(dirname, filename)
return os.path.isfile(full_name)
def idemp_mkdir(dirname):
'''Creates a directory in an idempotent fashion.'''
dirname = os.path.expanduser(dirname)
os.makedirs(dirname, exist_ok=True)
def prepare_out_file(dirname, filename):
dirname = os.path.expanduser(dirname)
full_name = os.path.join(dirname, filename)
if not check_file_exists(dirname, filename):
os.makedirs(os.path.dirname(full_name), exist_ok=True)
return full_name
def read_json(dirname, filename):
dirname = os.path.expanduser(dirname)
with open(os.path.join(dirname, filename)) as json_file:
data = json.load(json_file)
return data
def write_json(dirname, filename, obj):
filename = prepare_out_file(dirname, filename)
with open(filename, 'w') as outfile:
json.dump(obj, outfile)
def read_config(dirname):
return read_json(dirname, 'config.json')
def write_status(output_dir, success, message):
write_json(output_dir, 'status.json', {
'success': success,
'message': message
})
def write_summary(output_dir, title, value):
write_json(output_dir, 'summary.json', {
'title': title,
'value': value
})
def get_timestamp():
time = datetime.datetime.now()
return time.strftime('%m-%d-%Y-%H%M')
def parse_timestamp(data):
return datetime.datetime.strptime(data['timestamp'], '%m-%d-%Y-%H%M')
def time_difference(entry1, entry2):
'''
Returns a datetime object corresponding to the difference in
timestamps between two data entries.
(Entry 1 time - entry 2 time)
'''
return parse_timestamp(entry1) - parse_timestamp(entry2)
def sort_data(data_dir):
'''Sorts all data files in the given directory by timestamp.'''
data_dir = os.path.expanduser(data_dir)
all_data = []
for _, _, files in os.walk(data_dir):
for name in files:
data = read_json(data_dir, name)
all_data.append(data)
return sorted(all_data, key=parse_timestamp)
def gather_stats(sorted_data, fields):
'''
Expects input in the form of a list of data objects with timestamp
fields (like those returned by sort_data).
For each entry, this looks up entry[field[0]][field[1]]...
for all entries that have all the fields, skipping those that
don't. Returns a pair (list of entry values,
list of corresponding entry timestamps)
'''
stats = []
times = []
for entry in sorted_data:
stat = entry
not_present = False
for field in fields:
if field not in stat:
not_present = True
break
stat = stat[field]
if not_present:
continue
times.append(parse_timestamp(entry))
stats.append(stat)
return (stats, times)
def traverse_fields(entry, ignore_fields=None):
"""
Returns a list of sets of nested fields (one set per level of nesting)
of a JSON data entry produced by a benchmark analysis script.
Ignores the 'detailed' field by default (as old data files will not have detailed summaries).
Set ignore_fields to a non-None value to avoid the defaults.
"""
ignore_set = {'timestamp', 'detailed',
'start_time', 'end_time', 'time_delta', 'success',
'run_cpu_telemetry', 'run_gpu_telemetry'}
if ignore_fields is not None:
ignore_set = set(ignore_fields)
level_fields = {field for field in entry.keys()
if field not in ignore_set}
values_to_check = [entry[field] for field in level_fields
if isinstance(entry[field], dict)]
tail = []
max_len = 0
for value in values_to_check:
next_fields = traverse_fields(value)
tail.append(next_fields)
if len(next_fields) > max_len:
max_len = len(next_fields)
# combine all the field lists (union of each level's sets)
final_tail = []
for i in range(max_len):
u = set({})
final_tail.append(u.union(*[fields_list[i]
for fields_list in tail
if len(fields_list) > i]))
return [level_fields] + final_tail
def invoke_main(main_func, *arg_names):
"""
Generates an argument parser for arg_names and calls
main_func with the arguments it parses. Arguments
are assumed to be string-typed. The argument names should
be Python-valid names.
If main_func returns a value, this function assumes it to
be a return code. If not, this function will exit with code
0 after invoking main
"""
parser = argparse.ArgumentParser()
for arg_name in arg_names:
name = arg_name
parser.add_argument('--{}'.format(name.replace('_', '-')),
required=True, type=str)
args = parser.parse_args()
ret = main_func(*[getattr(args, name) for name in arg_names])
if ret is None:
sys.exit(0)
sys.exit(ret)
def render_exception(e):
return logging.Formatter.formatException(e, sys.exc_info())
|
the-stack_0_16344
|
from inspect import signature
from collections import namedtuple
import time
import numpy as np
import pandas as pd
from functools import singledispatch
#####################
# utils
#####################
class Timer():
def __init__(self):
self.times = [time.time()]
self.total_time = 0.0
def __call__(self, include_in_total=True):
self.times.append(time.time())
delta_t = self.times[-1] - self.times[-2]
if include_in_total:
self.total_time += delta_t
return delta_t
localtime = lambda: time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
class TableLogger():
def append(self, output):
if not hasattr(self, 'keys'):
self.keys = output.keys()
print(*(f'{k:>12s}' for k in self.keys))
filtered = [output[k] for k in self.keys]
print(*(f'{v:12.4f}' if isinstance(v, np.float) else f'{v:12}' for v in filtered))
#####################
## data preprocessing
#####################
cifar10_mean = (0.4914, 0.4822, 0.4465) # equals np.mean(train_set.train_data, axis=(0,1,2))/255
cifar10_std = (0.2471, 0.2435, 0.2616) # equals np.std(train_set.train_data, axis=(0,1,2))/255
def normalise(x, mean=cifar10_mean, std=cifar10_std):
x, mean, std = [np.array(a, np.float32) for a in (x, mean, std)]
x -= mean*255
x *= 1.0/(255*std)
return x
def pad(x, border=4):
return np.pad(x, [(0, 0), (border, border), (border, border), (0, 0)], mode='reflect')
def transpose(x, source='NHWC', target='NCHW'):
return x.transpose([source.index(d) for d in target])
#####################
## data augmentation
#####################
class Crop(namedtuple('Crop', ('h', 'w'))):
def __call__(self, x, x0, y0):
return x[:,y0:y0+self.h,x0:x0+self.w]
def options(self, x_shape):
C, H, W = x_shape
return {'x0': range(W+1-self.w), 'y0': range(H+1-self.h)}
def output_shape(self, x_shape):
C, H, W = x_shape
return (C, self.h, self.w)
class FlipLR(namedtuple('FlipLR', ())):
def __call__(self, x, choice):
return x[:, :, ::-1].copy() if choice else x
def options(self, x_shape):
return {'choice': [True, False]}
class Cutout(namedtuple('Cutout', ('h', 'w'))):
def __call__(self, x, x0, y0):
x = x.copy()
x[:,y0:y0+self.h,x0:x0+self.w].fill(0.0)
return x
def options(self, x_shape):
C, H, W = x_shape
return {'x0': range(W+1-self.w), 'y0': range(H+1-self.h)}
class Transform():
def __init__(self, dataset, transforms):
self.dataset, self.transforms = dataset, transforms
self.choices = None
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
data, labels = self.dataset[index]
for choices, f in zip(self.choices, self.transforms):
args = {k: v[index] for (k,v) in choices.items()}
data = f(data, **args)
return data, labels
def set_random_choices(self):
self.choices = []
x_shape = self.dataset[0][0].shape
N = len(self)
for t in self.transforms:
options = t.options(x_shape)
x_shape = t.output_shape(x_shape) if hasattr(t, 'output_shape') else x_shape
self.choices.append({k:np.random.choice(v, size=N) for (k,v) in options.items()})
#####################
## dict utils
#####################
union = lambda *dicts: {k: v for d in dicts for (k, v) in d.items()}
def path_iter(nested_dict, pfx=()):
for name, val in nested_dict.items():
if isinstance(val, dict): yield from path_iter(val, (*pfx, name))
else: yield ((*pfx, name), val)
#####################
## graph building
#####################
sep='_'
RelativePath = namedtuple('RelativePath', ('parts'))
rel_path = lambda *parts: RelativePath(parts)
def build_graph(net):
net = dict(path_iter(net))
default_inputs = [[('input',)]]+[[k] for k in net.keys()]
with_default_inputs = lambda vals: (val if isinstance(val, tuple) else (val, default_inputs[idx]) for idx,val in enumerate(vals))
parts = lambda path, pfx: tuple(pfx) + path.parts if isinstance(path, RelativePath) else (path,) if isinstance(path, str) else path
return {sep.join((*pfx, name)): (val, [sep.join(parts(x, pfx)) for x in inputs]) for (*pfx, name), (val, inputs) in zip(net.keys(), with_default_inputs(net.values()))}
#####################
## training utils
#####################
@singledispatch
def cat(*xs):
raise NotImplementedError
@singledispatch
def to_numpy(x):
raise NotImplementedError
class PiecewiseLinear(namedtuple('PiecewiseLinear', ('knots', 'vals'))):
def __call__(self, t):
return np.interp([t], self.knots, self.vals)[0]
class StatsLogger():
def __init__(self, keys):
self._stats = {k:[] for k in keys}
def append(self, output):
for k,v in self._stats.items():
v.append(output[k].detach())
def stats(self, key):
return cat(*self._stats[key])
def mean(self, key):
return np.mean(to_numpy(self.stats(key)), dtype=np.float)
def run_batches(model, batches, training, optimizer_step=None, stats=None):
stats = stats or StatsLogger(('loss', 'correct'))
model.train(training)
for batch in batches:
output = model(batch)
stats.append(output)
if training:
output['loss'].sum().backward()
optimizer_step()
model.zero_grad()
return stats
def train_epoch(model, train_batches, test_batches, optimizer_step, timer, test_time_in_total=True):
train_stats, train_time = run_batches(model, train_batches, True, optimizer_step), timer()
test_stats, test_time = run_batches(model, test_batches, False), timer(test_time_in_total)
return {
'train time': train_time, 'train loss': train_stats.mean('loss'), 'train acc': train_stats.mean('correct'),
'test time': test_time, 'test loss': test_stats.mean('loss'), 'test acc': test_stats.mean('correct'),
'total time': timer.total_time,
}
def train(model, optimizer, train_batches, test_batches, epochs,
loggers=(), test_time_in_total=True, timer=None):
timer = timer or Timer()
for epoch in range(epochs):
epoch_stats = train_epoch(model, train_batches, test_batches, optimizer.step, timer, test_time_in_total=test_time_in_total)
summary = union({'epoch': epoch+1, 'lr': optimizer.param_values()['lr']*train_batches.batch_size}, epoch_stats)
for logger in loggers:
logger.append(summary)
return summary
#####################
## network visualisation (requires pydot)
#####################
class ColorMap(dict):
palette = (
'bebada,ffffb3,fb8072,8dd3c7,80b1d3,fdb462,b3de69,fccde5,bc80bd,ccebc5,ffed6f,1f78b4,33a02c,e31a1c,ff7f00,'
'4dddf8,e66493,b07b87,4e90e3,dea05e,d0c281,f0e189,e9e8b1,e0eb71,bbd2a4,6ed641,57eb9c,3ca4d4,92d5e7,b15928'
).split(',')
def __missing__(self, key):
self[key] = self.palette[len(self) % len(self.palette)]
return self[key]
def make_pydot(nodes, edges, direction='LR', sep=sep, **kwargs):
import pydot
parent = lambda path: path[:-1]
stub = lambda path: path[-1]
class Subgraphs(dict):
def __missing__(self, path):
subgraph = pydot.Cluster(sep.join(path), label=stub(path), style='rounded, filled', fillcolor='#77777744')
self[parent(path)].add_subgraph(subgraph)
return subgraph
subgraphs = Subgraphs()
subgraphs[()] = g = pydot.Dot(rankdir=direction, directed=True, **kwargs)
g.set_node_defaults(
shape='box', style='rounded, filled', fillcolor='#ffffff')
for node, attr in nodes:
path = tuple(node.split(sep))
subgraphs[parent(path)].add_node(
pydot.Node(name=node, label=stub(path), **attr))
for src, dst, attr in edges:
g.add_edge(pydot.Edge(src, dst, **attr))
return g
get_params = lambda mod: {p.name: getattr(mod, p.name, '?') for p in signature(type(mod)).parameters.values()}
class DotGraph():
colors = ColorMap()
def __init__(self, net, size=15, direction='LR'):
graph = build_graph(net)
self.nodes = [(k, {
'tooltip': '%s %.1000r' % (type(n).__name__, get_params(n)),
'fillcolor': '#'+self.colors[type(n)],
}) for k, (n, i) in graph.items()]
self.edges = [(src, k, {}) for (k, (n, i)) in graph.items() for src in i]
self.size, self.direction = size, direction
def dot_graph(self, **kwargs):
return make_pydot(self.nodes, self.edges, size=self.size,
direction=self.direction, **kwargs)
def svg(self, **kwargs):
return self.dot_graph(**kwargs).create(format='svg').decode('utf-8')
try:
import pydot
def _repr_svg_(self):
return self.svg()
except ImportError:
def __repr__(self):
return 'pydot is needed for network visualisation'
walk = lambda dict_, key: walk(dict_, dict_[key]) if key in dict_ else key
def remove_by_type(net, node_type):
#remove identity nodes for more compact visualisations
graph = build_graph(net)
remap = {k: i[0] for k,(v,i) in graph.items() if isinstance(v, node_type)}
return {k: (v, [walk(remap, x) for x in i]) for k, (v,i) in graph.items() if not isinstance(v, node_type)}
|
the-stack_0_16346
|
# from framework.utils.analyzer_pydantic import ModelFieldEx
import inspect
from dataclasses import field
from typing import Any, Dict, List, Optional, Type, TypedDict
from fastapi import Query
from pydantic import BaseConfig, BaseModel, Field
from pydantic.fields import FieldInfo, ModelField, NoArgAnyCallable, Union, Validator
from pydantic.utils import smart_deepcopy
class DataclassFieldMeta(TypedDict):
default: Any
default_factory: Any
init: bool
repr: bool
hash: bool
compare: bool
metadata: Any
class PydanticFieldMeta(TypedDict):
name: str
type: Type
default: Any
default_factory: Any
title: str
alias: str
description: str
const: bool
gt: float
ge: float
lt: float
le: float
multiple_of: float
min_items: int
max_items: int
min_length: int
max_length: int
regex: str
# extra
allow_mutation: bool
# fastapi
deprecated: str
class ModelFieldEx:
"""https://www.apps-gcp.com/openapi_learn_the_basics/ に近づけたい"""
kind: inspect._ParameterKind
index: int
description: str
def __init__(
self,
*,
name: str,
type_: Type[Any] = Any, # type: ignore
kind: inspect._ParameterKind,
default: Any = inspect._empty, # type: ignore
# common
required: bool = True,
index: int = -1,
alias: str = None,
description: str = "",
meta: Any = None,
# pydantic
default_factory: Optional[NoArgAnyCallable] = None,
# class_validators: Optional[Dict[str, Validator]] = None,
# model_config: Type[BaseConfig] = BaseConfig = None,
# field_info: Optional[FieldInfo] = None,
# sqlalchemy
# column_type=None,
relation_type: str = "", # "ONETOONE"|"MANYTOONE"|"ONETOMANY"|"MANYTOMANY"|""
is_primary_key: bool = False,
foreign_keys: List[str] = [],
is_unique: bool = False,
is_index: bool = False,
is_nullable: bool = False,
is_system: bool = False,
) -> None:
if default is inspect._empty and not default_factory: # type: ignore
assert required == True
type_ = Any if type_ is inspect._empty else type_ # type: ignore
self.name = name
self.type_ = type_
# self.class_validators = class_validators
# self.model_config = model_config
self.default = default
self.default_factory = default_factory
self.required = required
self.alias = alias or name
# self.field_info = field_info
self.kind = kind
self.index = index
self.description = description or ""
self.meta = meta
# orm fields
# self.column_type = column_type
self.foreign_keys = foreign_keys
self.relation_type = relation_type
self.is_primary_key = is_primary_key
self.is_unique = is_unique
self.is_nullable = is_nullable
self.is_system = is_system
self.is_index = is_index
def get_meta_or_default(self, undefined: Any = inspect._empty) -> Any: # type: ignore
return self.meta or self.get_default(undefined=undefined)
def get_real_mata(self, undefined: Any = inspect._empty) -> Any: # type: ignore
"""定義されたままのメタ情報を取得する"""
raise NotImplementedError()
return self.meta
def get_mata(self, undefined: Any = inspect._empty) -> Any: # type: ignore
"""標準化されたpydanticのfieldinfoなどのメタ情報を取得する"""
return self.meta
def get_orm_mata(self, undefined: Any = inspect._empty) -> Any: # type: ignore
"""標準化されたsqlalchemyのcolumnなどのメタ情報を取得する"""
raise NotImplementedError()
return self.meta
def get_fulltextsearch_mata(self, undefined: Any = inspect._empty) -> Any: # type: ignore
"""標準化された全文検索に関するメタを取得する"""
raise NotImplementedError()
return self.meta
def get_default(self, undefined: Any = inspect._empty) -> Any: # type: ignore
"""デフォルト値かdefault_factoryに生成されたデフォルト値かemptyを返す。emptyは任意の値を返すことができる。"""
if self.required:
return undefined
# TODO:pydanticのフィールドはdeppcopyしない。
if isinstance(self.default, FieldInfo):
return self.default
return (
self.default_factory()
if self.default_factory
else smart_deepcopy(self.default)
)
def __str__(self) -> str:
name = self.name
type_ = self.type_
default = self.default
default_factory = self.default_factory
required = self.required
alias = self.alias
field_info = self.field_info
kind = self.kind
index = self.index
description = self.description
return f"{self.__class__!r}({name=},{type_=},{default=},{default_factory=},{required=},{alias=},{field_info=},{kind=},{index=},{description=})"
@classmethod
def from_parameter(cls, parameter: inspect.Parameter):
return cls.from_annotation_info(
name=parameter.name,
annotation=parameter.annotation,
default=parameter.default,
)
@classmethod
def from_annotation_info(
cls, name: str, annotation: Any = inspect._empty, default: Any = inspect._empty # type: ignore
):
annotation = Any if annotation is inspect._empty else annotation # type: ignore
parameter = inspect.Parameter(name=name, annotation=annotation, default=default) # type: ignore
raise NotImplementedError()
@classmethod
def from_pydantic_modelfield(cls, field: ModelField):
raise NotImplementedError()
|
the-stack_0_16348
|
import copy
import json
import re
import unittest
from django.contrib import admin
from django.contrib.auth import get_permission_codename
from django.contrib.auth.models import Permission
from django.template import RequestContext
from django.utils.encoding import force_str
from django.utils.html import escape
from django.utils.http import urlencode, urlunquote
from cms.api import add_plugin, create_page, create_title
from cms.models import CMSPlugin, Page, Title
from cms.utils.urlutils import admin_reverse
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from djangocms_text_ckeditor.compat import get_page_placeholders
from djangocms_text_ckeditor.models import Text
from djangocms_text_ckeditor.utils import (
_plugin_tags_to_html, _render_cms_plugin, plugin_tags_to_admin_html, plugin_tags_to_id_list, plugin_to_tag,
)
from tests.test_app.cms_plugins import DummyChildPlugin, DummyParentPlugin
from .base import BaseTestCase
try:
from djangocms_transfer.exporter import export_page
HAS_DJANGOCMS_TRANSFER = True
except ImportError:
HAS_DJANGOCMS_TRANSFER = False
try:
import djangocms_translations # noqa
HAS_DJANGOCMS_TRANSLATIONS = True
except ImportError:
HAS_DJANGOCMS_TRANSLATIONS = False
class PluginActionsTestCase(BaseTestCase):
def get_custom_admin_url(self, plugin_class, name):
plugin_type = plugin_class.__name__.lower()
url_name = f'{plugin_class.model._meta.app_label}_{plugin_type}_{name}'
return admin_reverse(url_name)
def _add_child_plugin(self, text_plugin, plugin_type='PicturePlugin', data_suffix=None):
name = f'{plugin_type} record'
if data_suffix is not None:
name = f'{name} {data_suffix}'
basic_plugins = {
'LinkPlugin': {
'name': name,
'external_link': 'https://www.django-cms.org',
},
'PreviewDisabledPlugin': {},
'SekizaiPlugin': {},
}
if plugin_type == 'PicturePlugin':
data = {'caption_text': name, 'picture': self.create_filer_image_object()}
else:
data = basic_plugins[plugin_type]
plugin = add_plugin(
text_plugin.placeholder,
plugin_type,
'en',
target=text_plugin,
**data,
)
return plugin
def _add_text_plugin(self, placeholder, plugin_type='TextPlugin'):
text_plugin = add_plugin(
placeholder,
plugin_type,
'en',
body='Hello World',
)
return text_plugin
def _replace_plugin_contents(self, text, new_plugin_content):
def _do_replace(obj, match):
return plugin_to_tag(obj, content=new_plugin_content)
return _plugin_tags_to_html(text, output_func=_do_replace)
def add_plugin_to_text(self, text_plugin, plugin):
text_plugin.body = f'{text_plugin.body} {plugin_to_tag(plugin)}'
text_plugin.save()
return text_plugin
def _give_permission(self, user, model, permission_type, save=True):
codename = get_permission_codename(permission_type, model._meta)
user.user_permissions.add(Permission.objects.get(codename=codename))
def _give_cms_permissions(self, user):
for perm_type in ['add', 'change', 'delete']:
for model in [Page, Title]:
self._give_permission(user, model, perm_type)
def get_page_admin(self):
admin.autodiscover()
return admin.site._registry[Page]
def get_post_request(self, data):
return self.get_request(post_data=data)
def get_plugin_id_from_response(self, response):
url = urlunquote(response.url)
# Ideal case, this looks like:
# /en/admin/cms/page/edit-plugin/1/
return re.findall(r'\d+', url)[0]
def test_add_and_edit_plugin(self):
"""
Test that you can add a text plugin
"""
admin = self.get_superuser()
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
endpoint = self.get_add_plugin_uri(simple_placeholder, 'TextPlugin')
with self.login_user_context(admin):
response = self.client.get(endpoint)
text_plugin_pk = self.get_plugin_id_from_response(response)
self.assertIn('?delete-on-cancel', response.url)
self.assertEqual(response.status_code, 302)
# Assert "ghost" plugin has been created
self.assertObjectExist(CMSPlugin.objects.all(), pk=text_plugin_pk)
cms_plugin = CMSPlugin.objects.get(pk=text_plugin_pk)
text_plugin_class = cms_plugin.get_plugin_class_instance()
# Assert "real" plugin has not been created yet
self.assertObjectDoesNotExist(Text.objects.all(), pk=text_plugin_pk)
add_url = response.url
with self.login_user_context(admin):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, cms_plugin)
response = self.client.get(add_url)
self.assertEqual(response.status_code, 200)
# Assert cancel token is present
self.assertContains(response, action_token)
with self.login_user_context(admin):
data = {'body': 'Hello world'}
response = self.client.post(add_url, data)
self.assertEqual(response.status_code, 200)
# Assert "real" plugin has been created yet
self.assertObjectExist(Text.objects.all(), pk=text_plugin_pk)
text_plugin = Text.objects.get(pk=text_plugin_pk)
# Assert the text was correctly saved
self.assertEqual(text_plugin.body, 'Hello world')
def test_add_and_cancel_plugin(self):
"""
Test that you can add a text plugin
"""
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
endpoint = self.get_add_plugin_uri(simple_placeholder, 'TextPlugin')
with self.login_user_context(self.get_superuser()):
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 302)
# Point to the newly created text plugin
text_plugin_pk = self.get_plugin_id_from_response(response)
cms_plugin = CMSPlugin.objects.get(pk=text_plugin_pk)
text_plugin_class = cms_plugin.get_plugin_class_instance()
# Assert "ghost" plugin has been created
self.assertObjectExist(CMSPlugin.objects.all(), pk=text_plugin_pk)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, cms_plugin)
data = {'token': action_token}
request = self.get_post_request(data)
response = text_plugin_class.delete_on_cancel(request)
self.assertEqual(response.status_code, 204)
# Assert "ghost" plugin has been removed
self.assertObjectDoesNotExist(CMSPlugin.objects.all(), pk=text_plugin_pk)
# Assert "real" plugin was never created
self.assertObjectDoesNotExist(Text.objects.all(), pk=text_plugin_pk)
# Assert user can't delete a non "ghost" plugin
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, text_plugin)
data = {'token': action_token}
request = self.get_post_request(data)
response = text_plugin_class.delete_on_cancel(request)
self.assertEqual(response.status_code, 400)
def test_copy_referenced_plugins(self):
"""
Test that copy+pasting a child plugin between text editors
creates proper copies of the child plugin and messes no other data up
"""
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
def _get_text_plugin_with_children():
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body='Text plugin we copy child plugins to',
)
_add_child_plugins_to_text_plugin(text_plugin)
return text_plugin
def _add_child_plugins_to_text_plugin(text_plugin):
child_plugin_1 = add_plugin(
simple_placeholder,
'PicturePlugin',
'en',
target=text_plugin,
picture=self.create_filer_image_object(),
caption_text='Child plugin one',
)
child_plugin_2 = add_plugin(
simple_placeholder,
'PicturePlugin',
'en',
target=text_plugin,
picture=self.create_filer_image_object(),
caption_text='Child plugin two',
)
self.add_plugin_to_text(text_plugin, child_plugin_1)
self.add_plugin_to_text(text_plugin, child_plugin_2)
def _copy_child_plugins_from_text(text_plugin_source, text_plugin_destination):
for child_plugin in text_plugin_source.cmsplugin_set.all():
text_plugin_destination.body += ' ' + plugin_to_tag(child_plugin)
text_plugin_destination.save()
_run_clean_and_copy(text_plugin_destination)
def _run_clean_and_copy(text_plugin):
text_plugin.clean_plugins()
text_plugin.copy_referenced_plugins()
def _get_common_children_ids(text_plugin_one, text_plugin_two):
original_children_ids = set(plugin_tags_to_id_list(text_plugin_one.body))
copied_children_ids = set(plugin_tags_to_id_list(text_plugin_two.body))
return original_children_ids.intersection(copied_children_ids)
text_plugin_copy_from = _get_text_plugin_with_children()
text_plugin_copy_to = _get_text_plugin_with_children()
_copy_child_plugins_from_text(text_plugin_copy_from, text_plugin_copy_to)
self.assertEqual(text_plugin_copy_from.cmsplugin_set.count(), 2)
self.assertEqual(text_plugin_copy_to.cmsplugin_set.count(), 4)
_run_clean_and_copy(text_plugin_copy_from)
_run_clean_and_copy(text_plugin_copy_to)
self.assertEqual(text_plugin_copy_from.cmsplugin_set.count(), 2)
self.assertEqual(text_plugin_copy_to.cmsplugin_set.count(), 4)
common_children_ids = _get_common_children_ids(text_plugin_copy_from, text_plugin_copy_to)
self.assertFalse(common_children_ids)
def test_add_and_cancel_child_plugin(self):
"""
Test that you can add a text plugin
"""
admin = self.get_superuser()
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
text_plugin_class = text_plugin.get_plugin_class_instance()
child_plugin_1 = add_plugin(
simple_placeholder,
'PicturePlugin',
'en',
target=text_plugin,
picture=self.create_filer_image_object(),
caption_text='Foo',
)
child_plugin_2 = add_plugin(
simple_placeholder,
'PicturePlugin',
'en',
target=text_plugin,
picture=self.create_filer_image_object(),
caption_text='Foo',
)
child_plugin_3 = add_plugin(
simple_placeholder,
'PicturePlugin',
'en',
target=text_plugin,
picture=self.create_filer_image_object(),
caption_text='Foo',
)
child_plugin_4 = add_plugin(
simple_placeholder,
'PicturePlugin',
'en',
target=text_plugin,
picture=self.create_filer_image_object(),
caption_text='Foo',
)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin_1)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin_4)
with self.login_user_context(admin):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, text_plugin)
# Assert user is unable to delete a saved child plugin
data = {'token': action_token, 'child_plugins': [child_plugin_1.pk]}
request = self.get_post_request(data)
response = text_plugin_class.delete_on_cancel(request)
self.assertEqual(response.status_code, 400)
self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_1.pk)
# Assert user is unable to delete if plugins array contains
# an unsaved plugin.
plugin_ids = [
child_plugin_1.pk,
child_plugin_2.pk,
child_plugin_3.pk,
child_plugin_4.pk,
]
data = {'token': action_token, 'child_plugins': plugin_ids}
request = self.get_post_request(data)
response = text_plugin_class.delete_on_cancel(request)
self.assertEqual(response.status_code, 400)
self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_1.pk)
self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_2.pk)
self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_3.pk)
self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_4.pk)
plugin_ids = [
child_plugin_2.pk,
child_plugin_3.pk,
]
data = {'token': action_token, 'child_plugins': plugin_ids}
request = self.get_post_request(data)
response = text_plugin_class.delete_on_cancel(request)
self.assertEqual(response.status_code, 204)
self.assertObjectDoesNotExist(CMSPlugin.objects.all(), pk=child_plugin_2.pk)
self.assertObjectDoesNotExist(CMSPlugin.objects.all(), pk=child_plugin_3.pk)
def test_action_token_per_session(self):
# Assert that a cancel token for the same plugin
# is different per user session.
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
text_plugin_class = text_plugin.get_plugin_class_instance()
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token_1 = text_plugin_class.get_action_token(request, text_plugin)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token_2 = text_plugin_class.get_action_token(request, text_plugin)
self.assertNotEqual(action_token_1, action_token_2)
def test_add_and_cancel_plugin_permissions(self):
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
endpoint = self.get_add_plugin_uri(simple_placeholder, 'TextPlugin')
with self.login_user_context(self.user):
response = self.client.post(endpoint, {})
self.assertEqual(response.status_code, 302)
# Point to the newly created text plugin
text_plugin_pk = self.get_plugin_id_from_response(response)
cms_plugin = CMSPlugin.objects.get(pk=text_plugin_pk)
text_plugin_class = cms_plugin.get_plugin_class_instance()
endpoint = self.get_custom_admin_url(TextPlugin, 'delete_on_cancel')
# Assert a standard user (no staff) can't delete ghost plugin
with self.login_user_context(self.get_standard_user()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, cms_plugin)
data = {'token': action_token}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
staff_user = self._create_user('addonly-staff', is_staff=True, is_superuser=False)
self._give_cms_permissions(staff_user)
self._give_permission(staff_user, text_plugin_class.model, 'add')
with self.login_user_context(staff_user):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, cms_plugin)
data = {'token': action_token}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 204)
def test_change_form_has_rendered_plugin_content(self):
"""
When the text form is rendered in the admin,
the child plugins are rendered as their contents passed
as initial data to the text field.
"""
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
child_plugins = [
self._add_child_plugin(text_plugin),
self._add_child_plugin(text_plugin),
]
for plugin in child_plugins:
text_plugin = self.add_plugin_to_text(text_plugin, plugin)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
context = RequestContext(request)
context['request'] = request
text_with_rendered_plugins = plugin_tags_to_admin_html(
text=text_plugin.body,
context=context,
)
endpoint = self.get_change_plugin_uri(text_plugin)
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['adminform'].form['body'].value(),
text_with_rendered_plugins,
)
self.assertContains(
response,
escape(text_with_rendered_plugins),
html=False,
)
def test_user_cant_edit_child_plugins_directly(self):
"""
No user regardless of permissions can modify the contents
of a child plugin directly in the text plugin text.
"""
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
child_plugins = [
self._add_child_plugin(text_plugin),
self._add_child_plugin(text_plugin),
]
for plugin in child_plugins:
text_plugin = self.add_plugin_to_text(text_plugin, plugin)
with self.login_user_context(self.get_superuser()):
expected_text = text_plugin.body
# This returns the child plugins with their content
# overridden to <img src="">
overridden_text = self._replace_plugin_contents(
text_plugin.body,
new_plugin_content='<img src="">',
)
endpoint = self.get_change_plugin_uri(text_plugin)
response = self.client.post(endpoint, {'body': overridden_text})
text_plugin.refresh_from_db()
self.assertEqual(response.status_code, 200)
self.assertXMLEqual(text_plugin.body, expected_text)
def test_render_child_plugin_endpoint(self):
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
text_plugin_class = text_plugin.get_plugin_class_instance()
child_plugin = self._add_child_plugin(text_plugin)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, text_plugin)
endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin')
endpoint += f'?token={action_token}&plugin={child_plugin.pk}'
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
context = RequestContext(request)
context['request'] = request
rendered_content = _render_cms_plugin(child_plugin, context)
rendered_child_plugin = plugin_to_tag(
child_plugin,
content=rendered_content,
admin=True,
)
self.assertEqual(force_str(response.content), rendered_child_plugin)
child_plugin = self._add_child_plugin(text_plugin, plugin_type='PreviewDisabledPlugin')
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, text_plugin)
endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin')
endpoint += f'?token={action_token}&plugin={child_plugin.pk}'
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
# it is important that we do not add any extra whitespace inside of
# <cms-plugin></cms-plugin>
rendered_child_plugin = ('<cms-plugin render-plugin=false '
'alt="Preview Disabled Plugin - 3 '
'"title="Preview Disabled Plugin - 3" '
'id="3"><span>Preview is disabled for this plugin</span>'
'</cms-plugin>')
self.assertEqual(force_str(response.content), rendered_child_plugin)
def test_render_child_plugin_endpoint_calls_context_processors(self):
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
text_plugin_class = text_plugin.get_plugin_class_instance()
child_plugin = self._add_child_plugin(
text_plugin,
plugin_type='SekizaiPlugin',
)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, text_plugin)
endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin')
endpoint += f'?token={action_token}&plugin={child_plugin.pk}'
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
context = RequestContext(request)
context['request'] = request
rendered_content = _render_cms_plugin(child_plugin, context)
rendered_child_plugin = plugin_to_tag(
child_plugin,
content=rendered_content,
admin=True,
)
self.assertEqual(force_str(response.content), rendered_child_plugin)
def test_render_child_plugin_permissions(self):
"""
Users can't render a child plugin without change permissions
on the placeholder attached object and the text plugin.
"""
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
text_plugin_class = text_plugin.get_plugin_class_instance()
child_plugin = self._add_child_plugin(text_plugin)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin)
with self.login_user_context(self.get_standard_user()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, text_plugin)
endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin')
endpoint += f'?token={action_token}&plugin={child_plugin.pk}'
response = self.client.get(endpoint)
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403, html=True)
def test_render_child_plugin_token_validation(self):
"""
Users can only render a child plugin if the token
was created in the current session and it's text plugin
matches the child plugin parent.
"""
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the first",
)
text_plugin_class = text_plugin.get_plugin_class_instance()
child_plugin = self._add_child_plugin(text_plugin)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin)
# Tokens are unique per session.
# Users can't render a child plugin with a token
# from another session.
with self.login_user_context(self.get_superuser()):
request = self.get_request()
with self.login_user_context(self.get_superuser()):
action_token = text_plugin_class.get_action_token(request, text_plugin)
endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin')
endpoint += f'?token={action_token}&plugin={child_plugin.pk}'
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 400)
self.assertEqual(force_str(response.content), 'Unable to process your request. Invalid token.')
text_plugin_2 = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body="I'm the second",
)
# Tokens are unique per text plugin.
# User can't render a child plugin for a token whose text plugin
# does not match the plugin's parent.
with self.login_user_context(self.get_superuser()):
request = self.get_request()
action_token = text_plugin_class.get_action_token(request, text_plugin_2)
endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin')
endpoint += f'?token={action_token}&plugin={child_plugin.pk}'
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 400)
self.assertEqual(force_str(response.content), 'Unable to process your request.')
def test_custom_ckeditor_body_css_classes(self):
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
parent_plugin = add_plugin(
simple_placeholder,
DummyParentPlugin,
'en',
label=DummyParentPlugin._ckeditor_body_class_label_trigger,
)
child_plugin = add_plugin(
simple_placeholder,
DummyChildPlugin,
'en',
target=parent_plugin,
)
text_plugin = add_plugin(
simple_placeholder,
'TextPlugin',
'en',
body='Content',
target=child_plugin,
)
with self.login_user_context(self.get_superuser()):
change_endpoint = self.get_change_plugin_uri(text_plugin)
response = self.client.get(change_endpoint)
self.assertContains(response, DummyParentPlugin._ckeditor_body_class)
self.assertContains(response, DummyChildPlugin.child_ckeditor_body_css_class)
def test_render_plugin(self):
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = self._add_text_plugin(simple_placeholder)
for i in range(0, 10):
plugin = self._add_child_plugin(
text_plugin,
plugin_type='LinkPlugin',
data_suffix=i,
)
text_plugin = self.add_plugin_to_text(text_plugin, plugin)
with self.assertNumQueries(2):
request = self.get_request()
context = RequestContext(request)
context['request'] = request
rendered = _render_cms_plugin(text_plugin, context)
for i in range(0, 10):
self.assertTrue('LinkPlugin record %d' % i in rendered)
def test_render_extended_plugin(self):
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = self._add_text_plugin(simple_placeholder, 'ExtendedTextPlugin')
for i in range(0, 10):
plugin = self._add_child_plugin(
text_plugin,
plugin_type='LinkPlugin',
data_suffix=i,
)
text_plugin = self.add_plugin_to_text(text_plugin, plugin)
with self.assertNumQueries(2):
request = self.get_request()
context = RequestContext(request)
context['request'] = request
rendered = _render_cms_plugin(text_plugin, context)
for i in range(0, 10):
self.assertTrue('LinkPlugin record %d' % i in rendered)
def test_copy_plugin_integrity(self):
"""
Test that copying of textplugins replaces references to copied plugins
"""
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin = self._add_text_plugin(simple_placeholder)
child_plugin_1 = self._add_child_plugin(
text_plugin,
plugin_type='LinkPlugin',
)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin_1)
child_plugin_2 = self._add_child_plugin(
text_plugin,
plugin_type='LinkPlugin',
)
text_plugin = self.add_plugin_to_text(text_plugin, child_plugin_2)
# create a page translation to copy plugins to
translation = create_title(
'fr',
'test-page-fr',
simple_page,
slug='test-page-fr',
)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), 3)
self.assertEqual(CMSPlugin.objects.filter(language=translation.language).count(), 0)
data = {
'source_placeholder_id': simple_placeholder.pk,
'target_placeholder_id': simple_placeholder.pk,
'target_language': translation.language,
'source_language': 'en',
}
endpoint = self.get_admin_url(Page, 'copy_plugins')
endpoint += '?' + urlencode({'cms_path': '/en/'})
with self.login_user_context(self.user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), 3)
self.assertEqual(CMSPlugin.objects.filter(language=translation.language).count(), 3)
plugins = list(CMSPlugin.objects.all())
new_plugin = plugins[3].get_plugin_instance()[0]
idlist = sorted(plugin_tags_to_id_list(new_plugin.body))
expected = sorted([plugins[4].pk, plugins[5].pk])
self.assertEqual(idlist, expected)
def test_copy_plugin_callback(self):
simple_page = create_page('test page', 'page.html', 'en')
simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content')
text_plugin_1 = self._add_text_plugin(simple_placeholder)
child_plugin_1_a = self._add_child_plugin(
text_plugin_1,
plugin_type='LinkPlugin',
)
text_plugin_1 = self.add_plugin_to_text(text_plugin_1, child_plugin_1_a)
child_plugin_1_b = self._add_child_plugin(
text_plugin_1,
plugin_type='LinkPlugin',
)
text_plugin_1 = self.add_plugin_to_text(text_plugin_1, child_plugin_1_b)
text_plugin_2 = copy.copy(text_plugin_1)
text_plugin_2.pk = None
text_plugin_2.save()
child_plugin_2_a = self._add_child_plugin(
text_plugin_2,
plugin_type='LinkPlugin',
)
child_plugin_2_b = self._add_child_plugin(
text_plugin_2,
plugin_type='LinkPlugin',
)
source_map = {
child_plugin_1_a.pk: child_plugin_2_a,
child_plugin_1_b.pk: child_plugin_2_b,
}
TextPlugin.do_post_copy(text_plugin_2, source_map)
text_plugin_2.refresh_from_db()
idlist = sorted(plugin_tags_to_id_list(text_plugin_2.body))
expected = sorted([child_plugin_2_a.pk, child_plugin_2_b.pk])
self.assertEqual(idlist, expected)
def test_plugin_tags_to_id_list(self):
pairs = (
('<cms-plugin id="1"></cms-plugin><cms-plugin id="2"></cms-plugin>', [1, 2]),
('<cms-plugin alt="<h1>markup</h1>" id="1"></cms-plugin><cms-plugin id="1"></cms-plugin>', [1, 1]),
)
for markup, expected in pairs:
self.assertEqual(plugin_tags_to_id_list(markup), expected)
def test_text_plugin_xss(self):
page = create_page('test page', 'page.html', 'en')
placeholder = get_page_placeholders(page, 'en').get(slot='content')
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
endpoint = self.get_change_plugin_uri(plugin)
with self.login_user_context(self.user):
data = {
'body': (
'<div onload="do_evil_stuff();">divcontent</div><a href="javascript:do_evil_stuff();">acontent</a>'
),
}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.reload(plugin).body, '<div>divcontent</div><a>acontent</a>')
@unittest.skipUnless(
HAS_DJANGOCMS_TRANSLATIONS and HAS_DJANGOCMS_TRANSFER,
'Optional dependencies for tests are not installed.',
)
class DjangoCMSTranslationsIntegrationTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.page = create_page('test page', 'page.html', 'en', published=True)
self.placeholder = get_page_placeholders(self.page, 'en').get(slot='content')
def _export_page(self):
return json.loads(export_page(self.page, 'en'))
def test_textfield_without_children(self):
raw_content = '<p>Please <a href="http://www.google.com">CLICK ON LINK1</a> to go to link1.</p>'
add_plugin(self.placeholder, 'TextPlugin', 'en', body=raw_content)
plugin = self._export_page()[0]['plugins'][0]
result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data'])
self.assertEqual(result, raw_content)
self.assertEqual(children_included_in_this_content, [])
result = TextPlugin.set_translation_import_content(result, plugin)
self.assertDictEqual(result, {})
def test_textfield_with_children(self):
parent = add_plugin(self.placeholder, 'TextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
parent_body = (
'<p>Please <cms-plugin alt="Dummy Link Plugin - dummy link object "'
'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link1.</p>'
).format(child1.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data'])
expected = (
parent_body
.replace('></cms-plugin>', '>CLICK ON LINK1</cms-plugin>', 1)
)
self.assertEqual(result, expected)
self.assertEqual(children_included_in_this_content, [child1.pk])
result = TextPlugin.set_translation_import_content(result, plugin)
self.assertDictEqual(result, {child1.pk: 'CLICK ON LINK1'})
def test_textfield_with_multiple_children(self):
parent = add_plugin(self.placeholder, 'TextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
child2 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK2')
parent_body = (
'<p>Please <cms-plugin alt="Dummy Link Plugin - dummy link object "'
'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link1 '
'or <cms-plugin alt="Dummy Link Plugin - dummy link object "'
'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link2.</p>'
).format(child1.pk, child2.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data'])
expected = (
parent_body
.replace('></cms-plugin>', '>CLICK ON LINK1</cms-plugin>', 1)
.replace('></cms-plugin>', '>CLICK ON LINK2</cms-plugin>', 1)
)
self.assertEqual(result, expected)
self.assertEqual(children_included_in_this_content, [child1.pk, child2.pk])
result = TextPlugin.set_translation_import_content(result, plugin)
self.assertDictEqual(result, {child1.pk: 'CLICK ON LINK1', child2.pk: 'CLICK ON LINK2'})
def test_textfield_with_multiple_children_one_deleted(self):
parent = add_plugin(self.placeholder, 'TextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
child2 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK2')
parent_body = (
'<p>Please <cms-plugin alt="Dummy Link Plugin - dummy link object "'
'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link1 '
'or <cms-plugin alt="Dummy Link Plugin - dummy link object "'
'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link2.</p>'
).format(child1.pk, child2.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
child1.delete()
result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data'])
expected = (
'<p>Please to go to link1 '
'or <cms-plugin alt="Dummy Link Plugin - dummy link object "'
'title="Dummy Link Plugin - dummy link object" id="{}">CLICK ON LINK2</cms-plugin> to go to link2.</p>'
).format(child2.pk)
self.assertEqual(result, expected)
self.assertEqual(children_included_in_this_content, [child2.pk])
result = TextPlugin.set_translation_import_content(result, plugin)
self.assertDictEqual(result, {child2.pk: 'CLICK ON LINK2'})
def test_textfield_with_untranslatable_children(self):
parent = add_plugin(self.placeholder, 'TextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummySpacerPlugin', 'en', target=parent)
parent_body = (
'<p>This is cool <cms-plugin alt="Dummy Spacer Plugin - dummy spacer object "'
'title="Dummy Spacer Plugin - dummy spacer object" id="{}"></cms-plugin> this is nice</p>'
).format(child1.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data'])
expected = (
parent_body
)
self.assertEqual(result, expected)
self.assertEqual(children_included_in_this_content, [child1.pk])
result = TextPlugin.set_translation_import_content(result, plugin)
self.assertDictEqual(result, {child1.pk: ''})
|
the-stack_0_16350
|
from typing import List
import inspect
from functools import partial, wraps
from json import JSONEncoder
class JsonSerializable(JSONEncoder):
_klasses: List[type] = []
def __init__(self, kls):
super().__init__()
self.__kls = kls
self._klasses.append(kls)
def get_json_members(self):
return inspect.getmembers(
self.__kls, lambda o: isinstance(o, JsonProperty)
)
def scan_properties(self, o):
for name, property in self.get_json_members():
value = getattr(o, name)
if value.__class__ in self._klasses:
value = value.default()
elif isinstance(value, (list, tuple)):
value = [
v.default() if v.__class__ in self._klasses else v
for v in value
]
yield name, value
def default(self, o):
if isinstance(o, self.__kls):
return dict(self.scan_properties(o))
return super().default(o)
def __call__(self, *args, **kwargs):
@wraps(self.__kls)
def wrapped(cls):
cls.__json__ = True
instance = cls(*args, **kwargs)
# setattr(inspect, 'default', partial(self.default, instance))
setattr(instance, 'default', partial(self.default, instance))
return instance
return wrapped(self.__kls)
class JsonProperty(property):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
json_property = JsonProperty
|
the-stack_0_16351
|
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
somap = 0
maior = 0
for l in range(0, 3):
for a in range(0, 3):
matriz[l][a] = int(input(f'Digite um valor para [{l}, {a}]: '))
print('-='*30)
for r in range(0, 3):
for i in range(0, 3):
print(f'[{matriz[r][i]:^5}]', end='')
'''
if matriz[r][i] % 2 == 0:
somap += e
'''
print()
print('-='*30)
for d in matriz:
for e in d:
if e % 2 == 0:
somap += e
print(f'A soma dos valores pares é {somap}')
somat = matriz[0][2] + matriz[1][2] + matriz[2][2]
'''
for k in range(0, 3):
somat += matriz[k][2]
'''
print(f'A soma dos valores da terceira coluna é {somat}.')
for f in matriz[1]:
if maior == 0 or f > maior:
maior = f
print(f'O maior valor da segunda linha é {maior}')
|
the-stack_0_16353
|
class Heap:
def __init__(self, A:list(int)=[]) -> None:
"""
A heap represents a nearly-complete binary tree that maintains a heap property between parents and children.
A heap represents a nearly-complete binary tree that maintains a heap property between parents and children.
Let ~ represent an inequality, then the heap property maintains that
A[i] ~ A[child[i]]
and
A[parent[i]] ~ A[i].
Note: usually heaps are in-place and operations have constant space complexity.
"""
assert A is not None
# Array containing the elements of the heap.
self.A = A
# Length of the complete array.
self.length = len(A)
def compare(self, i:int, j:int) -> bool:
"""
Takes values and performs a comparison based on relationship ~.
:param i: First value.
:type i: int
:param j: Second value.
:type j: int
:return: True if the relationship is held, False otherwise.
:rtype: bool
"""
pass
def left(self, i) -> int:
"""
Takes a node index and returns the left child.
:param i: Node index.
:type i: int
:return: Index of the left node.
:rtype: int
"""
assert i >= 0
return 2*i
def right(self, i) -> int:
"""
Takes a node index and returns the right child.
:param i: Node index.
:type i: int
:return: Index of the right node.
:rtype: int
"""
assert i >= 0
return 2*i + 1
def swap(self, i:int, j:int) -> None:
"""
Swaps two elements in the heap if the indices are valid.
:param i: Position of first element.
:type i: int
:param j: Position of second element.
:type j: int
:return: None.
"""
if 0 <= i <= self.length and 0 <= j <= self.length:
A = self.A
t = A[i]
A[i] = A[j]
A[j] = t
def heapify(self, i:int) -> None:
"""
Heapify takes an index and ensures the heap property for node i if the left and right children are heaps.
:param i: Index of a node to heapify.
:type i: int
:rtype: None
"""
assert 0 <= i < self.length
A = self.A
l = self.left(i)
r = self.right(i)
if r <= self.length:
# Case 1: all indices are valid.
if not self.compare(A[i], A[l]) and self.compare(A[l], A[r]):
# Case 1a: left ~ parent and left ~ right
self.swap(i, l)
self.heapify(l)
elif not self.compare(A[i], A[r]):
# Case 1b: right ~ parent and right ~ left
self.swap(i, r)
self.heapify(r)
# Case 1c: parent ~ left and parent ~ right, so the heap property is maintained.
elif l <= self.length:
# Case 2: the right index is not valid, but all others are.
if not self.compare(A[i], A[l]):
self.swap(i, l)
# Right was not valid, ie. A.length == l, so heapify recursion ends here.
# Case 3: parent is a leaf node.
def make_heap(A:list(int)) -> Heap:
"""
Constructs a Heap from an unsorted array of elements.
Constructs a Heap from an unsorted array of elements. Takes O(n*lg(n)) time complexity and O(n) space.
:param A: An array of values to add to the heap.
:return: A Heap with values from the array.
"""
heap = Heap(A)
for i in range(len(A) // 2, 0):
heap.heapify(i)
return heap
|
the-stack_0_16354
|
"""This is a test to test the paraview proxy manager API."""
from paraview import servermanager
import sys
servermanager.Connect()
sources = servermanager.sources.__dict__
for source in sources:
try:
sys.stderr.write('Creating %s...'%(source))
s = sources[source]()
s.UpdateVTKObjects()
sys.stderr.write('ok\n')
except:
sys.stderr.write('failed\n')
raise RuntimeError('ERROR: Failed to create %s'%(source))
|
the-stack_0_16356
|
#!/usr/bin/env python
from distutils.core import setup
LONG_DESCRIPTION = \
'''
This program is a basic python conversion of Mick Watson's Ideel.
It reads one or more input FASTA files and for each file it will use
prodigal for rapid annotation, then run diamond blast, then compare the
query length to hit length.
It was built with the help of 'Bionitio'
'''
setup(
name='pydeel',
version='0.2.0.0',
author='Alistair Legione',
author_email='[email protected]',
packages=['pydeel'],
package_dir={'pydeel': 'pydeel'},
entry_points={
'console_scripts': ['pydeel = pydeel.pydeel:main']
},
url='https://github.com/alegione/pydeel',
license='LICENSE',
description=('Assembly completion by annotation assessment'),
long_description=(LONG_DESCRIPTION),
install_requires=["argparse", "pandas", "altair", "seaborn", "selenium", "datetime", "Bio"],
)
|
the-stack_0_16360
|
import sys
import toml
import nltk
import logging
from typing import List
from pathlib import Path
logging.basicConfig(
format="%(asctime)s (PID %(process)d) [%(levelname)s] %(filename)s:%(lineno)d %(message)s",
level=logging.INFO,
handlers=[logging.StreamHandler(sys.stdout)],
)
BASE_DIR = Path(__file__).parent.parent.absolute()
with open(BASE_DIR / "pyproject.toml", "r", encoding="utf-8") as f:
CONFIG = toml.load(f)
TOKENS: List[str] = []
if not (BASE_DIR / "tokens.txt").exists():
logging.error("No tokens.txt file found. Please create one.")
else:
with open(BASE_DIR / "tokens.txt") as f:
TOKENS = f.read().strip().split("\n")
nltk.download("wordnet")
nltk.download("omw-1.4")
|
the-stack_0_16362
|
# vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from scipy.optimize import linear_sum_assignment
from . import kalman_filter
INFTY_COST = 1e+5
def min_cost_matching(
distance_metric, max_distance, tracks, detections, track_indices=None,
detection_indices=None):
"""Solve linear assignment problem.
Parameters
----------
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as well as
a list of N track indices and M detection indices. The metric should
return the NxM dimensional cost matrix, where element (i, j) is the
association cost between the i-th track in the given track indices and
the j-th detection in the given detection_indices.
max_distance : float
Gating threshold. Associations with cost larger than this value are
disregarded.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : List[int]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above).
detection_indices : List[int]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above).
Returns
-------
(List[(int, int)], List[int], List[int])
Returns a tuple with the following three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
if len(detection_indices) == 0 or len(track_indices) == 0:
return [], track_indices, detection_indices # Nothing to match.
cost_matrix = distance_metric(
tracks, detections, track_indices, detection_indices)
cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5
indices = linear_sum_assignment(cost_matrix)
indices = np.asarray(indices)
indices = np.transpose(indices)
matches, unmatched_tracks, unmatched_detections = [], [], []
for col, detection_idx in enumerate(detection_indices):
if col not in indices[:, 1]:
unmatched_detections.append(detection_idx)
for row, track_idx in enumerate(track_indices):
if row not in indices[:, 0]:
unmatched_tracks.append(track_idx)
for row, col in indices:
track_idx = track_indices[row]
detection_idx = detection_indices[col]
if cost_matrix[row, col] > max_distance:
unmatched_tracks.append(track_idx)
unmatched_detections.append(detection_idx)
else:
matches.append((track_idx, detection_idx))
return matches, unmatched_tracks, unmatched_detections
def matching_cascade(
distance_metric, max_distance, cascade_depth, tracks, detections,
track_indices=None, detection_indices=None):
"""Run matching cascade.
Parameters
----------
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as well as
a list of N track indices and M detection indices. The metric should
return the NxM dimensional cost matrix, where element (i, j) is the
association cost between the i-th track in the given track indices and
the j-th detection in the given detection indices.
max_distance : float
Gating threshold. Associations with cost larger than this value are
disregarded.
#distance 임계값
cascade_depth: int
The cascade depth, should be se to the maximum track age.
#track age의 최대값으로 cascade depth세팅
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : Optional[List[int]]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above). Defaults to all tracks.
detection_indices : Optional[List[int]]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above). Defaults to all
detections.
Returns
-------
(List[(int, int)], List[int], List[int])
Returns a tuple with the following three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = list(range(len(tracks)))
if detection_indices is None:
detection_indices = list(range(len(detections)))
unmatched_detections = detection_indices
matches = []
for level in range(cascade_depth):
if len(unmatched_detections) == 0: # No detections left
break
track_indices_l = [
k for k in track_indices
if tracks[k].time_since_update == 1 + level
]
if len(track_indices_l) == 0: # Nothing to match at this level
continue
matches_l, _, unmatched_detections = \
min_cost_matching(
distance_metric, max_distance, tracks, detections,
track_indices_l, unmatched_detections)
matches += matches_l
unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches))
return matches, unmatched_tracks, unmatched_detections
def gate_cost_matrix(
kf, cost_matrix, tracks, detections, track_indices, detection_indices,
gated_cost=INFTY_COST, only_position=False):
"""Invalidate infeasible entries in cost matrix based on the state
distributions obtained by Kalman filtering.
얻은 매트릭스 정보를 기반으로 무시할 개체를 선별
Parameters
----------
kf : The Kalman filter.
cost_matrix : ndarray
The NxM dimensional cost matrix, where N is the number of track indices
and M is the number of detection indices, such that entry (i, j) is the
association cost between `tracks[track_indices[i]]` and
`detections[detection_indices[j]]`.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : List[int]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above).
detection_indices : List[int]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above).
gated_cost : Optional[float]
Entries in the cost matrix corresponding to infeasible associations are
set this value. Defaults to a very large value.
only_position : Optional[bool]
If True, only the x, y position of the state distribution is considered
during gating. Defaults to False.
Returns
-------
ndarray
Returns the modified cost matrix.
"""
gating_dim = 2 if only_position else 4
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray(
[detections[i].to_xyah() for i in detection_indices])
for row, track_idx in enumerate(track_indices):
track = tracks[track_idx]
gating_distance = kf.gating_distance(
track.mean, track.covariance, measurements, only_position)
cost_matrix[row, gating_distance > gating_threshold] = gated_cost
return cost_matrix
|
the-stack_0_16363
|
import sys
import argparse
from svtools.external_cmd import ExternalCmd
class BedpeSort(ExternalCmd):
def __init__(self):
super(BedpeSort, self).__init__('bedpesort', 'bin/bedpesort')
def description():
return 'sort a BEDPE file'
def epilog():
return 'To read in stdin and output to a file, use /dev/stdin or - as the first positional argument.'
def add_arguments_to_parser(parser):
parser.add_argument('input', metavar='<BEDPE file>', nargs='?', help='BEDPE file to sort')
parser.add_argument('output', metavar='<output file>', nargs='?', help='output file to write to')
parser.set_defaults(entry_point=run_from_args)
def command_parser():
parser = argparse.ArgumentParser(description=description())
add_arguments_to_parser(parser)
return parser
def run_from_args(args):
opts = list()
if args.input:
opts.append(args.input)
if args.output:
opts.append(args.output)
sort_cmd_runner = BedpeSort()
sort_cmd_runner.run_cmd_with_options(opts)
if __name__ == "__main__":
parser = command_parser()
args = parser.parse_args()
sys.exit(args.entry_point(args))
|
the-stack_0_16364
|
import asyncio
from weakref import ref
from decimal import Decimal
import re
import threading
import traceback, sys
from typing import TYPE_CHECKING, List
from kivy.app import App
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.compat import string_types
from kivy.properties import (ObjectProperty, DictProperty, NumericProperty,
ListProperty, StringProperty)
from kivy.uix.recycleview import RecycleView
from kivy.uix.label import Label
from kivy.uix.behaviors import ToggleButtonBehavior
from kivy.uix.image import Image
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.utils import platform
from electrum.bitcoin import TYPE_ADDRESS
from electrum.util import profiler, parse_URI, format_time, InvalidPassword, NotEnoughFunds, Fiat
from electrum.util import PR_TYPE_ONCHAIN, PR_TYPE_LN
from electrum import bitcoin, constants
from electrum.transaction import TxOutput, Transaction, tx_from_str
from electrum.util import send_exception_to_crash_reporter, parse_URI, InvalidBitcoinURI
from electrum.util import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED, PR_INFLIGHT, TxMinedInfo, get_request_status, pr_expiration_values
from electrum.plugin import run_hook
from electrum.wallet import InternalAddressCorruption
from electrum import simple_config
from electrum.lnaddr import lndecode
from electrum.lnutil import RECEIVED, SENT, PaymentFailure
from .dialogs.question import Question
from .dialogs.lightning_open_channel import LightningOpenChannelDialog
from electrum.gui.kivy.i18n import _
if TYPE_CHECKING:
from electrum.gui.kivy.main_window import ElectrumWindow
class HistoryRecycleView(RecycleView):
pass
class RequestRecycleView(RecycleView):
pass
class PaymentRecycleView(RecycleView):
pass
class CScreen(Factory.Screen):
__events__ = ('on_activate', 'on_deactivate', 'on_enter', 'on_leave')
action_view = ObjectProperty(None)
loaded = False
kvname = None
app = App.get_running_app() # type: ElectrumWindow
def _change_action_view(self):
app = App.get_running_app()
action_bar = app.root.manager.current_screen.ids.action_bar
_action_view = self.action_view
if (not _action_view) or _action_view.parent:
return
action_bar.clear_widgets()
action_bar.add_widget(_action_view)
def on_enter(self):
# FIXME: use a proper event don't use animation time of screen
Clock.schedule_once(lambda dt: self.dispatch('on_activate'), .25)
pass
def update(self):
pass
@profiler
def load_screen(self):
self.screen = Builder.load_file('electrum/gui/kivy/uix/ui_screens/' + self.kvname + '.kv')
self.add_widget(self.screen)
self.loaded = True
self.update()
setattr(self.app, self.kvname + '_screen', self)
def on_activate(self):
if self.kvname and not self.loaded:
self.load_screen()
#Clock.schedule_once(lambda dt: self._change_action_view())
def on_leave(self):
self.dispatch('on_deactivate')
def on_deactivate(self):
pass
# note: this list needs to be kept in sync with another in qt
TX_ICONS = [
"unconfirmed",
"close",
"unconfirmed",
"close",
"clock1",
"clock2",
"clock3",
"clock4",
"clock5",
"confirmed",
]
class HistoryScreen(CScreen):
tab = ObjectProperty(None)
kvname = 'history'
cards = {}
def __init__(self, **kwargs):
self.ra_dialog = None
super(HistoryScreen, self).__init__(**kwargs)
def show_item(self, obj):
key = obj.key
tx = self.app.wallet.db.get_transaction(key)
if not tx:
return
self.app.tx_dialog(tx)
def get_card(self, tx_item): #tx_hash, tx_mined_status, value, balance):
is_lightning = tx_item.get('lightning', False)
timestamp = tx_item['timestamp']
key = tx_item.get('txid') or tx_item['payment_hash']
if is_lightning:
status = 0
txpos = tx_item['txpos']
status_str = 'unconfirmed' if timestamp is None else format_time(int(timestamp))
icon = "atlas://electrum/gui/kivy/theming/light/lightning"
message = tx_item['label']
fee_msat = tx_item['fee_msat']
fee = int(fee_msat/1000) if fee_msat else None
fee_text = '' if fee is None else 'fee: %d sat'%fee
else:
tx_hash = tx_item['txid']
conf = tx_item['confirmations']
txpos = tx_item['txpos_in_block'] or 0
height = tx_item['height']
tx_mined_info = TxMinedInfo(height=tx_item['height'],
conf=tx_item['confirmations'],
timestamp=tx_item['timestamp'])
status, status_str = self.app.wallet.get_tx_status(tx_hash, tx_mined_info)
icon = "atlas://electrum/gui/kivy/theming/light/" + TX_ICONS[status]
message = tx_item['label'] or tx_hash
fee = tx_item['fee_sat']
fee_text = '' if fee is None else 'fee: %d sat'%fee
ri = {}
ri['screen'] = self
ri['key'] = key
ri['icon'] = icon
ri['date'] = status_str
ri['message'] = message
ri['fee_text'] = fee_text
value = tx_item['value'].value
if value is not None:
ri['is_mine'] = value <= 0
ri['amount'] = self.app.format_amount(value, is_diff = True)
if 'fiat_value' in tx_item:
ri['quote_text'] = str(tx_item['fiat_value'])
return ri
def update(self, see_all=False):
wallet = self.app.wallet
if wallet is None:
return
history = sorted(wallet.get_full_history(self.app.fx).values(), key=lambda x: x.get('timestamp') or float('inf'), reverse=True)
history_card = self.screen.ids.history_container
history_card.data = [self.get_card(item) for item in history]
class SendScreen(CScreen):
kvname = 'send'
payment_request = None
payment_request_queued = None
parsed_URI = None
def set_URI(self, text):
if not self.app.wallet:
self.payment_request_queued = text
return
try:
uri = parse_URI(text, self.app.on_pr, loop=self.app.asyncio_loop)
except InvalidBitcoinURI as e:
self.app.show_info(_("Error parsing URI") + f":\n{e}")
return
self.parsed_URI = uri
amount = uri.get('amount')
self.screen.address = uri.get('address', '')
self.screen.message = uri.get('message', '')
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
self.payment_request = None
self.screen.is_lightning = False
def set_ln_invoice(self, invoice):
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
self.app.show_info(invoice + _(" is not a valid Lightning invoice: ") + repr(e)) # repr because str(Exception()) == ''
return
self.screen.address = invoice
self.screen.message = dict(lnaddr.tags).get('d', None)
self.screen.amount = self.app.format_amount_and_units(lnaddr.amount * bitcoin.COIN) if lnaddr.amount else ''
self.payment_request = None
self.screen.is_lightning = True
def update(self):
if not self.loaded:
return
if self.app.wallet and self.payment_request_queued:
self.set_URI(self.payment_request_queued)
self.payment_request_queued = None
_list = self.app.wallet.get_invoices()
_list = [x for x in _list if x and x.get('status') != PR_PAID or x.get('rhash') in self.app.wallet.lnworker.logs]
payments_container = self.screen.ids.payments_container
payments_container.data = [self.get_card(item) for item in _list]
def show_item(self, obj):
self.app.show_invoice(obj.is_lightning, obj.key)
def get_card(self, item):
invoice_type = item['type']
status, status_str = get_request_status(item) # convert to str
if invoice_type == PR_TYPE_LN:
key = item['rhash']
log = self.app.wallet.lnworker.logs.get(key)
if item['status'] == PR_INFLIGHT and log:
status_str += '... (%d)'%len(log)
elif invoice_type == PR_TYPE_ONCHAIN:
key = item['id']
else:
raise Exception('unknown invoice type')
return {
'is_lightning': invoice_type == PR_TYPE_LN,
'is_bip70': 'bip70' in item,
'screen': self,
'status': status,
'status_str': status_str,
'key': key,
'memo': item['message'],
'amount': self.app.format_amount_and_units(item['amount'] or 0),
}
def do_clear(self):
self.screen.amount = ''
self.screen.message = ''
self.screen.address = ''
self.payment_request = None
self.screen.locked = False
self.parsed_URI = None
def set_request(self, pr):
self.screen.address = pr.get_requestor()
amount = pr.get_amount()
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
self.screen.message = pr.get_memo()
self.screen.locked = True
self.payment_request = pr
def do_paste(self):
data = self.app._clipboard.paste().strip()
if not data:
self.app.show_info(_("Clipboard is empty"))
return
# try to decode as transaction
try:
raw_tx = tx_from_str(data)
tx = Transaction(raw_tx)
tx.deserialize()
except:
tx = None
if tx:
self.app.tx_dialog(tx)
return
lower = data.lower()
if lower.startswith('lightning:ln'):
lower = lower[10:]
# try to decode as URI/address
if lower.startswith('ln'):
self.set_ln_invoice(lower)
else:
self.set_URI(data)
def read_invoice(self):
address = str(self.screen.address)
if not address:
self.app.show_error(_('Recipient not specified.') + ' ' + _('Please scan a Bitcoin address or a payment request'))
return
if not self.screen.amount:
self.app.show_error(_('Please enter an amount'))
return
try:
amount = self.app.get_amount(self.screen.amount)
except:
self.app.show_error(_('Invalid amount') + ':\n' + self.screen.amount)
return
message = self.screen.message
if self.screen.is_lightning:
return self.app.wallet.lnworker.parse_bech32_invoice(address)
else:
if not bitcoin.is_address(address):
self.app.show_error(_('Invalid Bitcoin Address') + ':\n' + address)
return
outputs = [TxOutput(TYPE_ADDRESS, address, amount)]
return self.app.wallet.create_invoice(outputs, message, self.payment_request, self.parsed_URI)
def do_save(self):
invoice = self.read_invoice()
if not invoice:
return
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
self.do_pay_invoice(invoice)
def do_pay_invoice(self, invoice):
if invoice['type'] == PR_TYPE_LN:
self._do_pay_lightning(invoice)
return
elif invoice['type'] == PR_TYPE_ONCHAIN:
do_pay = lambda rbf: self._do_pay_onchain(invoice, rbf)
if self.app.electrum_config.get('use_rbf'):
d = Question(_('Should this transaction be replaceable?'), do_pay)
d.open()
else:
do_pay(False)
else:
raise Exception('unknown invoice type')
def _do_pay_lightning(self, invoice):
attempts = 10
threading.Thread(target=self.app.wallet.lnworker.pay, args=(invoice['invoice'], invoice['amount'], attempts)).start()
def _do_pay_onchain(self, invoice, rbf):
# make unsigned transaction
outputs = invoice['outputs'] # type: List[TxOutput]
amount = sum(map(lambda x: x.value, outputs))
coins = self.app.wallet.get_spendable_coins(None)
try:
tx = self.app.wallet.make_unsigned_transaction(coins, outputs, None)
except NotEnoughFunds:
self.app.show_error(_("Not enough funds"))
return
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.app.show_error(repr(e))
return
if rbf:
tx.set_rbf(True)
fee = tx.get_fee()
msg = [
_("Amount to be sent") + ": " + self.app.format_amount_and_units(amount),
_("Mining fee") + ": " + self.app.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.app.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append(_("Additional fees") + ": " + self.app.format_amount_and_units(x_fee_amount))
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
msg.append(_("Enter your PIN code to proceed"))
self.app.protected('\n'.join(msg), self.send_tx, (tx, invoice))
def send_tx(self, tx, invoice, password):
if self.app.wallet.has_password() and password is None:
return
def on_success(tx):
if tx.is_complete():
self.app.broadcast(tx, invoice)
else:
self.app.tx_dialog(tx)
def on_failure(error):
self.app.show_error(error)
if self.app.wallet.can_sign(tx):
self.app.show_info("Signing...")
self.app.sign_tx(tx, password, on_success, on_failure)
else:
self.app.tx_dialog(tx)
class ReceiveScreen(CScreen):
kvname = 'receive'
def __init__(self, **kwargs):
super(ReceiveScreen, self).__init__(**kwargs)
Clock.schedule_interval(lambda dt: self.update(), 5)
def expiry(self):
return self.app.electrum_config.get('request_expiry', 3600) # 1 hour
def clear(self):
self.screen.address = ''
self.screen.amount = ''
self.screen.message = ''
self.screen.lnaddr = ''
def set_address(self, addr):
self.screen.address = addr
def on_address(self, addr):
req = self.app.wallet.get_request(addr)
self.screen.status = ''
if req:
self.screen.message = req.get('memo', '')
amount = req.get('amount')
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
status = req.get('status', PR_UNKNOWN)
self.screen.status = _('Payment received') if status == PR_PAID else ''
def get_URI(self):
from electrum.util import create_bip21_uri
amount = self.screen.amount
if amount:
a, u = self.screen.amount.split()
assert u == self.app.base_unit
amount = Decimal(a) * pow(10, self.app.decimal_point())
return create_bip21_uri(self.screen.address, amount, self.screen.message)
def do_copy(self):
uri = self.get_URI()
self.app._clipboard.copy(uri)
self.app.show_info(_('Request copied to clipboard'))
def new_request(self, lightning):
amount = self.screen.amount
amount = self.app.get_amount(amount) if amount else 0
message = self.screen.message
if lightning:
key = self.app.wallet.lnworker.add_request(amount, message, self.expiry())
else:
addr = self.screen.address or self.app.wallet.get_unused_address()
if not addr:
self.app.show_info(_('No address available. Please remove some of your pending requests.'))
return
self.screen.address = addr
req = self.app.wallet.make_payment_request(addr, amount, message, self.expiry())
self.app.wallet.add_payment_request(req)
key = addr
self.clear()
self.update()
self.app.show_request(lightning, key)
def get_card(self, req):
is_lightning = req.get('type') == PR_TYPE_LN
if not is_lightning:
address = req['address']
key = address
else:
key = req['rhash']
address = req['invoice']
amount = req.get('amount')
description = req.get('memo', '')
status, status_str = get_request_status(req)
ci = {}
ci['screen'] = self
ci['address'] = address
ci['is_lightning'] = is_lightning
ci['key'] = key
ci['amount'] = self.app.format_amount_and_units(amount) if amount else ''
ci['memo'] = description
ci['status'] = status_str
ci['is_expired'] = status == PR_EXPIRED
return ci
def update(self):
if not self.loaded:
return
_list = self.app.wallet.get_sorted_requests()
requests_container = self.screen.ids.requests_container
requests_container.data = [self.get_card(item) for item in _list if item.get('status') != PR_PAID]
def show_item(self, obj):
self.app.show_request(obj.is_lightning, obj.key)
def expiration_dialog(self, obj):
from .dialogs.choice_dialog import ChoiceDialog
def callback(c):
self.app.electrum_config.set_key('request_expiry', c)
d = ChoiceDialog(_('Expiration date'), pr_expiration_values, self.expiry(), callback)
d.open()
def clear_requests_dialog(self):
expired = [req for req in self.app.wallet.get_sorted_requests() if req['status'] == PR_EXPIRED]
if len(expired) == 0:
return
def callback(c):
if c:
for req in expired:
is_lightning = req.get('lightning', False)
key = req['rhash'] if is_lightning else req['address']
self.app.wallet.delete_request(key)
self.update()
d = Question(_('Delete expired requests?'), callback)
d.open()
class TabbedCarousel(Factory.TabbedPanel):
'''Custom TabbedPanel using a carousel used in the Main Screen
'''
carousel = ObjectProperty(None)
def animate_tab_to_center(self, value):
scrlv = self._tab_strip.parent
if not scrlv:
return
idx = self.tab_list.index(value)
n = len(self.tab_list)
if idx in [0, 1]:
scroll_x = 1
elif idx in [n-1, n-2]:
scroll_x = 0
else:
scroll_x = 1. * (n - idx - 1) / (n - 1)
mation = Factory.Animation(scroll_x=scroll_x, d=.25)
mation.cancel_all(scrlv)
mation.start(scrlv)
def on_current_tab(self, instance, value):
self.animate_tab_to_center(value)
def on_index(self, instance, value):
current_slide = instance.current_slide
if not hasattr(current_slide, 'tab'):
return
tab = current_slide.tab
ct = self.current_tab
try:
if ct.text != tab.text:
carousel = self.carousel
carousel.slides[ct.slide].dispatch('on_leave')
self.switch_to(tab)
carousel.slides[tab.slide].dispatch('on_enter')
except AttributeError:
current_slide.dispatch('on_enter')
def switch_to(self, header):
# we have to replace the functionality of the original switch_to
if not header:
return
if not hasattr(header, 'slide'):
header.content = self.carousel
super(TabbedCarousel, self).switch_to(header)
try:
tab = self.tab_list[-1]
except IndexError:
return
self._current_tab = tab
tab.state = 'down'
return
carousel = self.carousel
self.current_tab.state = "normal"
header.state = 'down'
self._current_tab = header
# set the carousel to load the appropriate slide
# saved in the screen attribute of the tab head
slide = carousel.slides[header.slide]
if carousel.current_slide != slide:
carousel.current_slide.dispatch('on_leave')
carousel.load_slide(slide)
slide.dispatch('on_enter')
def add_widget(self, widget, index=0):
if isinstance(widget, Factory.CScreen):
self.carousel.add_widget(widget)
return
super(TabbedCarousel, self).add_widget(widget, index=index)
|
the-stack_0_16365
|
import os
import re
regex = list()
dir_path = os.path.dirname(os.path.realpath(__file__))
f = open(dir_path + '/../regex.txt')
lines = f.readlines()
for line in lines:
if (len(line) > 10):
# Remove the \n at the end
regex.append(re.compile('^' + line[1:-2] + '$'))
class Pincode:
@staticmethod
def validate(code):
for r in regex:
if r.match(code) != None:
return True
return False
|
the-stack_0_16367
|
import custom_paths
from pathlib import Path
import utils
import shutil
from typing import *
# This file contains some utility functions to modify/rename/remove saved results.
# It can be used for example if the names of some experiment results should be changed.
def rename_alg(exp_name: str, old_name: str, new_name: str):
print(f'Renaming alg "{old_name}" to "{new_name}" for {exp_name} experiments')
results_path = Path(custom_paths.get_results_path()) / exp_name
for task_path in results_path.iterdir():
if utils.existsDir(task_path / old_name):
shutil.move(task_path / old_name, task_path / new_name)
def remove_alg(exp_name: str, alg_name: str):
print(f'Removing alg "{alg_name}" for {exp_name} experiments')
results_path = Path(custom_paths.get_results_path()) / exp_name
for task_path in results_path.iterdir():
if utils.existsDir(task_path / alg_name):
shutil.rmtree(task_path / alg_name)
def replace_in_alg_name(exp_name: str, old_name: str, new_name: str):
print(f'Replacing "{old_name}" with "{new_name}" in alg names for {exp_name} experiments')
results_path = Path(custom_paths.get_results_path()) / exp_name
for task_path in results_path.iterdir():
for alg_path in task_path.iterdir():
alg_name = str(alg_path.name)
new_alg_name = alg_name.replace(old_name, new_name)
if alg_name != new_alg_name:
shutil.move(task_path / alg_name, task_path / new_alg_name)
def process_results(exp_name: str, f: Callable):
print('Applying function to results for {exp_name} experiments')
results_path = Path(custom_paths.get_results_path()) / exp_name
for task_path in results_path.iterdir():
for alg_path in task_path.iterdir():
for split_path in alg_path.iterdir():
file_path = split_path / 'results.json'
if utils.existsFile(file_path):
results = utils.deserialize(file_path, use_json=True)
results = f(results)
utils.serialize(file_path, results, use_json=True)
if __name__ == '__main__':
pass
|
the-stack_0_16368
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BERT classification or regression finetuning runner in TF 2.x."""
import functools
import json
import math
import os
# Import libraries
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
from official.common import distribute_utils
from official.modeling import performance
from official.nlp import optimization
from official.nlp.bert import bert_models
from official.nlp.bert import common_flags
from official.nlp.bert import configs as bert_configs
from official.nlp.bert import input_pipeline
from official.nlp.bert import model_saving_utils
from official.utils.misc import keras_utils
flags.DEFINE_enum(
'mode', 'train_and_eval', ['train_and_eval', 'export_only', 'predict'],
'One of {"train_and_eval", "export_only", "predict"}. `train_and_eval`: '
'trains the model and evaluates in the meantime. '
'`export_only`: will take the latest checkpoint inside '
'model_dir and export a `SavedModel`. `predict`: takes a checkpoint and '
'restores the model to output predictions on the test set.')
flags.DEFINE_string('train_data_path', None,
'Path to training data for BERT classifier.')
flags.DEFINE_string('eval_data_path', None,
'Path to evaluation data for BERT classifier.')
flags.DEFINE_string(
'input_meta_data_path', None,
'Path to file that contains meta data about input '
'to be used for training and evaluation.')
flags.DEFINE_integer('train_data_size', None, 'Number of training samples '
'to use. If None, uses the full train data. '
'(default: None).')
flags.DEFINE_string('predict_checkpoint_path', None,
'Path to the checkpoint for predictions.')
flags.DEFINE_integer(
'num_eval_per_epoch', 1,
'Number of evaluations per epoch. The purpose of this flag is to provide '
'more granular evaluation scores and checkpoints. For example, if original '
'data has N samples and num_eval_per_epoch is n, then each epoch will be '
'evaluated every N/n samples.')
flags.DEFINE_integer('train_batch_size', 32, 'Batch size for training.')
flags.DEFINE_integer('eval_batch_size', 32, 'Batch size for evaluation.')
common_flags.define_common_bert_flags()
FLAGS = flags.FLAGS
LABEL_TYPES_MAP = {'int': tf.int64, 'float': tf.float32}
def get_loss_fn(num_classes):
"""Gets the classification loss function."""
def classification_loss_fn(labels, logits):
"""Classification loss."""
labels = tf.squeeze(labels)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(
tf.cast(labels, dtype=tf.int32), depth=num_classes, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(
tf.cast(one_hot_labels, dtype=tf.float32) * log_probs, axis=-1)
return tf.reduce_mean(per_example_loss)
return classification_loss_fn
def get_dataset_fn(input_file_pattern,
max_seq_length,
global_batch_size,
is_training,
label_type=tf.int64,
include_sample_weights=False,
num_samples=None):
"""Gets a closure to create a dataset."""
def _dataset_fn(ctx=None):
"""Returns tf.data.Dataset for distributed BERT pretraining."""
batch_size = ctx.get_per_replica_batch_size(
global_batch_size) if ctx else global_batch_size
dataset = input_pipeline.create_classifier_dataset(
tf.io.gfile.glob(input_file_pattern),
max_seq_length,
batch_size,
is_training=is_training,
input_pipeline_context=ctx,
label_type=label_type,
include_sample_weights=include_sample_weights,
num_samples=num_samples)
return dataset
return _dataset_fn
def run_bert_classifier(strategy,
bert_config,
input_meta_data,
model_dir,
epochs,
steps_per_epoch,
steps_per_loop,
eval_steps,
warmup_steps,
initial_lr,
init_checkpoint,
train_input_fn,
eval_input_fn,
training_callbacks=True,
custom_callbacks=None,
custom_metrics=None):
"""Run BERT classifier training using low-level API."""
max_seq_length = input_meta_data['max_seq_length']
num_classes = input_meta_data.get('num_labels', 1)
is_regression = num_classes == 1
def _get_classifier_model():
"""Gets a classifier model."""
classifier_model, core_model = (
bert_models.classifier_model(
bert_config,
num_classes,
max_seq_length,
hub_module_url=FLAGS.hub_module_url,
hub_module_trainable=FLAGS.hub_module_trainable))
optimizer = optimization.create_optimizer(initial_lr,
steps_per_epoch * epochs,
warmup_steps, FLAGS.end_lr,
FLAGS.optimizer_type)
classifier_model.optimizer = performance.configure_optimizer(
optimizer,
use_float16=common_flags.use_float16(),
use_graph_rewrite=common_flags.use_graph_rewrite(),
use_experimental_api=False)
return classifier_model, core_model
# tf.keras.losses objects accept optional sample_weight arguments (eg. coming
# from the dataset) to compute weighted loss, as used for the regression
# tasks. The classification tasks, using the custom get_loss_fn don't accept
# sample weights though.
loss_fn = (tf.keras.losses.MeanSquaredError() if is_regression
else get_loss_fn(num_classes))
# Defines evaluation metrics function, which will create metrics in the
# correct device and strategy scope.
if custom_metrics:
metric_fn = custom_metrics
elif is_regression:
metric_fn = functools.partial(
tf.keras.metrics.MeanSquaredError,
'mean_squared_error',
dtype=tf.float32)
else:
metric_fn = functools.partial(
tf.keras.metrics.SparseCategoricalAccuracy,
'accuracy',
dtype=tf.float32)
# Start training using Keras compile/fit API.
logging.info('Training using TF 2.x Keras compile/fit API with '
'distribution strategy.')
return run_keras_compile_fit(
model_dir,
strategy,
_get_classifier_model,
train_input_fn,
eval_input_fn,
loss_fn,
metric_fn,
init_checkpoint,
epochs,
steps_per_epoch,
steps_per_loop,
eval_steps,
training_callbacks=training_callbacks,
custom_callbacks=custom_callbacks)
def run_keras_compile_fit(model_dir,
strategy,
model_fn,
train_input_fn,
eval_input_fn,
loss_fn,
metric_fn,
init_checkpoint,
epochs,
steps_per_epoch,
steps_per_loop,
eval_steps,
training_callbacks=True,
custom_callbacks=None):
"""Runs BERT classifier model using Keras compile/fit API."""
with strategy.scope():
training_dataset = train_input_fn()
evaluation_dataset = eval_input_fn() if eval_input_fn else None
bert_model, sub_model = model_fn()
optimizer = bert_model.optimizer
if init_checkpoint:
checkpoint = tf.train.Checkpoint(model=sub_model, encoder=sub_model)
checkpoint.read(init_checkpoint).assert_existing_objects_matched()
if not isinstance(metric_fn, (list, tuple)):
metric_fn = [metric_fn]
bert_model.compile(
optimizer=optimizer,
loss=loss_fn,
metrics=[fn() for fn in metric_fn],
steps_per_execution=steps_per_loop)
summary_dir = os.path.join(model_dir, 'summaries')
summary_callback = tf.keras.callbacks.TensorBoard(summary_dir)
checkpoint = tf.train.Checkpoint(model=bert_model, optimizer=optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
directory=model_dir,
max_to_keep=None,
step_counter=optimizer.iterations,
checkpoint_interval=0)
checkpoint_callback = keras_utils.SimpleCheckpoint(checkpoint_manager)
if training_callbacks:
if custom_callbacks is not None:
custom_callbacks += [summary_callback, checkpoint_callback]
else:
custom_callbacks = [summary_callback, checkpoint_callback]
history = bert_model.fit(
x=training_dataset,
validation_data=evaluation_dataset,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_steps=eval_steps,
callbacks=custom_callbacks)
stats = {'total_training_steps': steps_per_epoch * epochs}
if 'loss' in history.history:
stats['train_loss'] = history.history['loss'][-1]
if 'val_accuracy' in history.history:
stats['eval_metrics'] = history.history['val_accuracy'][-1]
return bert_model, stats
def get_predictions_and_labels(strategy,
trained_model,
eval_input_fn,
is_regression=False,
return_probs=False):
"""Obtains predictions of trained model on evaluation data.
Note that list of labels is returned along with the predictions because the
order changes on distributing dataset over TPU pods.
Args:
strategy: Distribution strategy.
trained_model: Trained model with preloaded weights.
eval_input_fn: Input function for evaluation data.
is_regression: Whether it is a regression task.
return_probs: Whether to return probabilities of classes.
Returns:
predictions: List of predictions.
labels: List of gold labels corresponding to predictions.
"""
@tf.function
def test_step(iterator):
"""Computes predictions on distributed devices."""
def _test_step_fn(inputs):
"""Replicated predictions."""
inputs, labels = inputs
logits = trained_model(inputs, training=False)
if not is_regression:
probabilities = tf.nn.softmax(logits)
return probabilities, labels
else:
return logits, labels
outputs, labels = strategy.run(_test_step_fn, args=(next(iterator),))
# outputs: current batch logits as a tuple of shard logits
outputs = tf.nest.map_structure(strategy.experimental_local_results,
outputs)
labels = tf.nest.map_structure(strategy.experimental_local_results, labels)
return outputs, labels
def _run_evaluation(test_iterator):
"""Runs evaluation steps."""
preds, golds = list(), list()
try:
with tf.experimental.async_scope():
while True:
probabilities, labels = test_step(test_iterator)
for cur_probs, cur_labels in zip(probabilities, labels):
if return_probs:
preds.extend(cur_probs.numpy().tolist())
else:
preds.extend(tf.math.argmax(cur_probs, axis=1).numpy())
golds.extend(cur_labels.numpy().tolist())
except (StopIteration, tf.errors.OutOfRangeError):
tf.experimental.async_clear_error()
return preds, golds
test_iter = iter(strategy.distribute_datasets_from_function(eval_input_fn))
predictions, labels = _run_evaluation(test_iter)
return predictions, labels
def export_classifier(model_export_path, input_meta_data, bert_config,
model_dir):
"""Exports a trained model as a `SavedModel` for inference.
Args:
model_export_path: a string specifying the path to the SavedModel directory.
input_meta_data: dictionary containing meta data about input and model.
bert_config: Bert configuration file to define core bert layers.
model_dir: The directory where the model weights and training/evaluation
summaries are stored.
Raises:
Export path is not specified, got an empty string or None.
"""
if not model_export_path:
raise ValueError('Export path is not specified: %s' % model_export_path)
if not model_dir:
raise ValueError('Export path is not specified: %s' % model_dir)
# Export uses float32 for now, even if training uses mixed precision.
tf.keras.mixed_precision.set_global_policy('float32')
classifier_model = bert_models.classifier_model(
bert_config,
input_meta_data.get('num_labels', 1),
hub_module_url=FLAGS.hub_module_url,
hub_module_trainable=False)[0]
model_saving_utils.export_bert_model(
model_export_path, model=classifier_model, checkpoint_dir=model_dir)
def run_bert(strategy,
input_meta_data,
model_config,
train_input_fn=None,
eval_input_fn=None,
init_checkpoint=None,
custom_callbacks=None,
custom_metrics=None):
"""Run BERT training."""
# Enables XLA in Session Config. Should not be set for TPU.
keras_utils.set_session_config(FLAGS.enable_xla)
performance.set_mixed_precision_policy(common_flags.dtype(),
use_experimental_api=False)
epochs = FLAGS.num_train_epochs * FLAGS.num_eval_per_epoch
train_data_size = (
input_meta_data['train_data_size'] // FLAGS.num_eval_per_epoch)
if FLAGS.train_data_size:
train_data_size = min(train_data_size, FLAGS.train_data_size)
logging.info('Updated train_data_size: %s', train_data_size)
steps_per_epoch = int(train_data_size / FLAGS.train_batch_size)
warmup_steps = int(epochs * train_data_size * 0.1 / FLAGS.train_batch_size)
eval_steps = int(
math.ceil(input_meta_data['eval_data_size'] / FLAGS.eval_batch_size))
if not strategy:
raise ValueError('Distribution strategy has not been specified.')
if not custom_callbacks:
custom_callbacks = []
if FLAGS.log_steps:
custom_callbacks.append(
keras_utils.TimeHistory(
batch_size=FLAGS.train_batch_size,
log_steps=FLAGS.log_steps,
logdir=FLAGS.model_dir))
trained_model, _ = run_bert_classifier(
strategy,
model_config,
input_meta_data,
FLAGS.model_dir,
epochs,
steps_per_epoch,
FLAGS.steps_per_loop,
eval_steps,
warmup_steps,
FLAGS.learning_rate,
init_checkpoint or FLAGS.init_checkpoint,
train_input_fn,
eval_input_fn,
custom_callbacks=custom_callbacks,
custom_metrics=custom_metrics)
if FLAGS.model_export_path:
model_saving_utils.export_bert_model(
FLAGS.model_export_path, model=trained_model)
return trained_model
def custom_main(custom_callbacks=None, custom_metrics=None):
"""Run classification or regression.
Args:
custom_callbacks: list of tf.keras.Callbacks passed to training loop.
custom_metrics: list of metrics passed to the training loop.
"""
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)
with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
input_meta_data = json.loads(reader.read().decode('utf-8'))
label_type = LABEL_TYPES_MAP[input_meta_data.get('label_type', 'int')]
include_sample_weights = input_meta_data.get('has_sample_weights', False)
if not FLAGS.model_dir:
FLAGS.model_dir = '/tmp/bert20/'
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.mode == 'export_only':
export_classifier(FLAGS.model_export_path, input_meta_data, bert_config,
FLAGS.model_dir)
return
strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
tpu_address=FLAGS.tpu)
eval_input_fn = get_dataset_fn(
FLAGS.eval_data_path,
input_meta_data['max_seq_length'],
FLAGS.eval_batch_size,
is_training=False,
label_type=label_type,
include_sample_weights=include_sample_weights)
if FLAGS.mode == 'predict':
num_labels = input_meta_data.get('num_labels', 1)
with strategy.scope():
classifier_model = bert_models.classifier_model(
bert_config, num_labels)[0]
checkpoint = tf.train.Checkpoint(model=classifier_model)
latest_checkpoint_file = (
FLAGS.predict_checkpoint_path or
tf.train.latest_checkpoint(FLAGS.model_dir))
assert latest_checkpoint_file
logging.info('Checkpoint file %s found and restoring from '
'checkpoint', latest_checkpoint_file)
checkpoint.restore(
latest_checkpoint_file).assert_existing_objects_matched()
preds, _ = get_predictions_and_labels(
strategy,
classifier_model,
eval_input_fn,
is_regression=(num_labels == 1),
return_probs=True)
output_predict_file = os.path.join(FLAGS.model_dir, 'test_results.tsv')
with tf.io.gfile.GFile(output_predict_file, 'w') as writer:
logging.info('***** Predict results *****')
for probabilities in preds:
output_line = '\t'.join(
str(class_probability)
for class_probability in probabilities) + '\n'
writer.write(output_line)
return
if FLAGS.mode != 'train_and_eval':
raise ValueError('Unsupported mode is specified: %s' % FLAGS.mode)
train_input_fn = get_dataset_fn(
FLAGS.train_data_path,
input_meta_data['max_seq_length'],
FLAGS.train_batch_size,
is_training=True,
label_type=label_type,
include_sample_weights=include_sample_weights,
num_samples=FLAGS.train_data_size)
run_bert(
strategy,
input_meta_data,
bert_config,
train_input_fn,
eval_input_fn,
custom_callbacks=custom_callbacks,
custom_metrics=custom_metrics)
def main(_):
custom_main(custom_callbacks=None, custom_metrics=None)
if __name__ == '__main__':
flags.mark_flag_as_required('bert_config_file')
flags.mark_flag_as_required('input_meta_data_path')
flags.mark_flag_as_required('model_dir')
app.run(main)
|
the-stack_0_16370
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_FlexProbBootstrap [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_FlexProbBootstrap&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExerFPspec).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from collections import namedtuple
from numpy import arange, array, zeros, diff, log
from numpy import min as npmin, max as npmax
from numpy.random import choice
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, bar, xlim, ylim, scatter, ylabel, \
xlabel, title, xticks, yticks
import matplotlib.dates as mdates
plt.style.use('seaborn')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from ARPM_utils import save_plot, struct_to_dict, date_mtop
from HistogramFP import HistogramFP
from EffectiveScenarios import EffectiveScenarios
from Stats import Stats
from ColorCodedFP import ColorCodedFP
# -
# ## Upload database
# +
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_Stocks'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_Stocks'), squeeze_me=True)
SPX = struct_to_dict(db['SPX'])
# -
# ## Compute the realized time series of the S&P 500 log-returns
# +
SPX_ = SPX.Price_close
date = SPX.Date
epsi = diff(log(SPX_))
t_ = len(epsi)
epsi = epsi.reshape(1,-1)
date = date[1:]
# -
# ## FLEXIBLE PROBABILITIES FROM BOOTSTRAP
# +
k_ = 252 # size of subsamples
q_ = 5 # number of subsamples (and frames)
prob_bs = zeros((q_, t_))
ens = zeros((1, q_))
typ = namedtuple('type','Entropy')
typ.Entropy = 'Exp'
for q in range(q_):
r = choice(arange(t_), size=k_, replace=False)
prob_bs[q, r] = 1 / k_
ens[0,q] = EffectiveScenarios(prob_bs[[q],:], typ)
# -
# ## HFP histogram and statistics
# +
q_ = prob_bs.shape[0]
option = namedtuple('option', 'n_bins')
option.n_bins = 10*log(epsi.shape[1])
p, x = {}, {}
for q in range(q_):
p[q], x[q] = HistogramFP(epsi, prob_bs[[q],:], option)
mu, sdev, VaR, CVaR, skewness, kurtosis = Stats(epsi, prob_bs)
# -
# ## Figure
date_tick = arange(99, t_-1, 680)
date_dt = array([date_mtop(i) for i in date])
myFmt = mdates.DateFormatter('%d-%b-%Y')
# ## q=0
for q in range(2):
figure()
# FP profile
plt.subplot2grid((3, 3), (0, 0), colspan=2)
plt.gca().set_facecolor('white')
bar(date_dt, prob_bs[q, :], facecolor=[0.5, 0.5, 0.5], edgecolor=[0.5, 0.5, 0.5])
xlim([min(date_dt), max(date_dt)])
xticks(date_dt[date_tick])
plt.gca().xaxis.set_major_formatter(myFmt)
ylim([0, 1.1 * npmax(prob_bs[q, :])])
yticks([])
title('FLEXIBLE PROBABILITIES FROM BOOTSTRAP')
ylabel('probability')
TEXT = 'Effective Num.Scenarios = % 3.0f' % ens[0, q]
plt.text(min(date_dt), 1.05 * npmax(prob_bs[q, :]), TEXT, horizontalalignment='left')
# scatter colormap and colors
CM, C = ColorCodedFP(prob_bs[[q], :], 10 ** -20, npmax(prob_bs[:5, :]), arange(0, 0.95, 0.05), 0, 1, [1, 0])
# Time series of S&P500 log-rets
ax = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)
scatter(date_dt, epsi, 15, c=C, marker='.', cmap=CM)
xlim([min(date_dt), max(date_dt)])
xticks(date_dt[date_tick])
plt.gca().xaxis.set_major_formatter(myFmt)
ax.set_facecolor('white')
ylim([1.1 * npmin(epsi), 1.1 * npmax(epsi)])
ylabel('returns')
title('S&P')
# HFP histogram
plt.subplot2grid((3, 3), (1, 2), rowspan=2)
plt.gca().set_facecolor('white')
plt.barh(x[q][:-1], p[q][0], height=x[q][1] - x[q][0], facecolor=[0.7, 0.7, 0.7], edgecolor=[0.5, 0.5, 0.5])
xlim([0, 1.05 * npmax(p[q])])
xticks([])
yticks([]), ylim([1.1 * npmin(epsi), 1.1 * npmax(epsi)])
xlabel('probability')
plt.tight_layout();
# statistics
TEXT = 'Mean % 3.3f \nSdev %3.3f \nVaR %3.3f \nCVaR %3.3f \nSkew %3.3f \nKurt %3.3f' % (
mu[q], sdev[q], VaR[q], CVaR[q], skewness[q], kurtosis[q])
plt.text(0.5 * npmax(p[q]), 0.08, TEXT, horizontalalignment='left', verticalalignment='bottom');
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
|
the-stack_0_16375
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from dotenv import load_dotenv
import os
# init SQLAlchemy so we can use it later in our models
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
# with app.app_context():
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
app.config['SERVER_NAME'] = 'local.docker:5000'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY') or 'no_secret_key_set'
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('SQLALCHEMY_DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir, 'db/threatnote.db')
app.config['OTX_API_KEY'] = os.environ.get('OTX_API_KEY')
app.config['SHODAN_API_KEY'] = os.environ.get('SHODAN_API_KEY')
app.config['RISKIQ_USERNAME'] = os.environ.get('RISKIQ_USERNAME')
app.config['RISKIQ_KEY'] = os.environ.get('RISKIQ_KEY')
app.config['GREYNOISE_API_KEY'] = os.environ.get('GREYNOISE_API_KEY')
app.config['EMAILREP_API_KEY'] = os.environ.get('EMAILREP_API_KEY')
app.config['VT_API_KEY'] = os.environ.get('VT_API_KEY')
app.config['MISP_API_KEY'] = os.environ.get('MISP_API_KEY')
app.config['MISP_URL'] = os.environ.get('MISP_URL')
app.config['HIBP_API_KEY'] = os.environ.get('HIBP_API_KEY')
db.init_app(app)
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.init_app(app)
from models import User
@login_manager.user_loader
def load_user(user_id):
# since the user_id is just the primary key of our user table, use it in the query for the user
return User.query.get(int(user_id))
# blueprint for auth routes in our app
from auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint)
# blueprint for non-auth parts of app
from main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
|
the-stack_0_16379
|
# Imports
from os import path
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option="A"):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == "A":
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(
lambda x: F.pad(
x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4),
"constant",
0,
)
)
elif option == "B":
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class BBN_ResNet_Cifar(nn.Module):
"""ResNet32 from the "BBN: Bilateral-Branch Network with Cumulative Learning for Long-Tailed Visual Recognition (CVPR 2020)" """
def __init__(self, block, num_blocks):
"""Initialize
#FIXME
Args:
block ([type]): [description]
num_blocks ([type]): [description]
"""
super(BBN_ResNet_Cifar, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2] - 1, stride=2)
self.cb_block = block(self.in_planes, self.in_planes, stride=1)
self.rb_block = block(self.in_planes, self.in_planes, stride=1)
self.apply(_weights_init)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def load_model(self, pretrain_dir):
"""Load a pre-trained model
Args:
pretrain_dir (str): path of pretrained model
"""
print(f"Loading Backbone pretrain model from {pretrain_dir}......")
model_dict = self.state_dict()
pretrain_dict = torch.load(pretrain_dir)["state_dict_best"]["feat_model"]
new_dict = OrderedDict()
# Removing FC and Classifier layers
for k, v in pretrain_dict.items():
if k.startswith("module"):
k = k[7:]
if "fc" not in k and "classifier" not in k:
new_dict[k] = v
model_dict.update(new_dict)
self.load_state_dict(model_dict)
print("Backbone model has been loaded......")
def _make_layer(self, block, planes, num_blocks, stride, add_flag=True):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, **kwargs):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
if "feature_cb" in kwargs:
out = self.cb_block(out)
return out
elif "feature_rb" in kwargs:
out = self.rb_block(out)
return out
out1 = self.cb_block(out)
out2 = self.rb_block(out)
out = torch.cat((out1, out2), dim=1)
out = self.avgpool(out)
out = out.view(out.shape[0], -1)
return out
def create_model(pretrain=False, pretrain_dir=None, *args):
"""Initialize/load the model
Args:
pretrain (bool, optional): Use pre-trained model?. Defaults to False.
pretrain_dir (str, optional): Directory of the pre-trained model. Defaults to None.
Returns:
class: Model
"""
print("Loading ResNet 32 Feature Model.")
resnet32 = BBN_ResNet_Cifar(BasicBlock, [5, 5, 5])
if pretrain:
if path.exists(pretrain_dir):
print("===> Load Pretrain Initialization for ResNet32")
resnet32.load_model(pretrain_dir=pretrain_dir)
else:
raise Exception(f"Pretrain path doesn't exist!!-{pretrain_dir}")
else:
print("===> Train backbone from the scratch")
return resnet32
|
the-stack_0_16380
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Jobs."""
import copy
import threading
from six.moves import http_client
import google.api_core.future.polling
from google.cloud import exceptions
from google.cloud.exceptions import NotFound
from google.cloud._helpers import _datetime_from_microseconds
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.external_config import ExternalConfig
from google.cloud.bigquery.query import _query_param_from_api_repr
from google.cloud.bigquery.query import ArrayQueryParameter
from google.cloud.bigquery.query import ScalarQueryParameter
from google.cloud.bigquery.query import StructQueryParameter
from google.cloud.bigquery.query import UDFResource
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import EncryptionConfiguration
from google.cloud.bigquery.table import TableReference
from google.cloud.bigquery import _helpers
from google.cloud.bigquery._helpers import DEFAULT_RETRY
from google.cloud.bigquery._helpers import _int_or_none
_DONE_STATE = 'DONE'
_STOPPED_REASON = 'stopped'
_TIMEOUT_BUFFER_SECS = 0.1
_ERROR_REASON_TO_EXCEPTION = {
'accessDenied': http_client.FORBIDDEN,
'backendError': http_client.INTERNAL_SERVER_ERROR,
'billingNotEnabled': http_client.FORBIDDEN,
'billingTierLimitExceeded': http_client.BAD_REQUEST,
'blocked': http_client.FORBIDDEN,
'duplicate': http_client.CONFLICT,
'internalError': http_client.INTERNAL_SERVER_ERROR,
'invalid': http_client.BAD_REQUEST,
'invalidQuery': http_client.BAD_REQUEST,
'notFound': http_client.NOT_FOUND,
'notImplemented': http_client.NOT_IMPLEMENTED,
'quotaExceeded': http_client.FORBIDDEN,
'rateLimitExceeded': http_client.FORBIDDEN,
'resourceInUse': http_client.BAD_REQUEST,
'resourcesExceeded': http_client.BAD_REQUEST,
'responseTooLarge': http_client.FORBIDDEN,
'stopped': http_client.OK,
'tableUnavailable': http_client.BAD_REQUEST,
}
def _error_result_to_exception(error_result):
"""Maps BigQuery error reasons to an exception.
The reasons and their matching HTTP status codes are documented on
the `troubleshooting errors`_ page.
.. _troubleshooting errors: https://cloud.google.com/bigquery\
/troubleshooting-errors
:type error_result: Mapping[str, str]
:param error_result: The error result from BigQuery.
:rtype google.cloud.exceptions.GoogleCloudError:
:returns: The mapped exception.
"""
reason = error_result.get('reason')
status_code = _ERROR_REASON_TO_EXCEPTION.get(
reason, http_client.INTERNAL_SERVER_ERROR)
return exceptions.from_http_status(
status_code, error_result.get('message', ''), errors=[error_result])
class Compression(object):
"""The compression type to use for exported files.
Possible values include `GZIP`, `DEFLATE`, `SNAPPY`, and `NONE`. The
default value is `NONE`. `DEFLATE` and `SNAPPY` are only supported for
Avro.
"""
GZIP = 'GZIP'
DEFLATE = 'DEFLATE'
SNAPPY = 'SNAPPY'
NONE = 'NONE'
class CreateDisposition(object):
"""Specifies whether the job is allowed to create new tables.
The following values are supported:
`CREATE_IF_NEEDED`: If the table does not exist, BigQuery creates
the table.
`CREATE_NEVER`: The table must already exist. If it does not,
a 'notFound' error is returned in the job result.
The default value is `CREATE_IF_NEEDED`.
Creation, truncation and append actions occur as one atomic update
upon job completion.
"""
CREATE_IF_NEEDED = 'CREATE_IF_NEEDED'
CREATE_NEVER = 'CREATE_NEVER'
class DestinationFormat(object):
"""The exported file format.
Possible values include `CSV`, `NEWLINE_DELIMITED_JSON` and `AVRO`.
The default value is `CSV`. Tables with nested or repeated fields
cannot be exported as CSV.
"""
CSV = 'CSV'
NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON'
AVRO = 'AVRO'
class Encoding(object):
"""The character encoding of the data. The supported values
are `UTF_8` corresponding to `'UTF-8'` or `ISO_8859_1` corresponding to
`'ISO-8559-1'`. The default value is `UTF_8`.
BigQuery decodes the data after the raw, binary data has been
split using the values of the quote and fieldDelimiter properties.
"""
UTF_8 = 'UTF-8'
ISO_8559_1 = 'ISO-8559-1'
class QueryPriority(object):
"""Specifies a priority for the query.
Possible values include `INTERACTIVE` and `BATCH`. The default value
is `INTERACTIVE`.
"""
INTERACTIVE = 'INTERACTIVE'
BATCH = 'BATCH'
class SourceFormat(object):
"""The format of the data files.
For CSV files, specify `CSV`. For datastore backups, specify
`DATASTORE_BACKUP`. For newline-delimited json, specify
`NEWLINE_DELIMITED_JSON`. For Avro, specify `AVRO`. For Parquet, specify
`PARQUET`. The default value is `CSV`.
"""
CSV = 'CSV'
DATASTORE_BACKUP = 'DATASTORE_BACKUP'
NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON'
AVRO = 'AVRO'
PARQUET = 'PARQUET'
class WriteDisposition(object):
"""Specifies the action that occurs if destination table already exists.
The following values are supported:
`WRITE_TRUNCATE`: If the table already exists, BigQuery overwrites the
table data.
`WRITE_APPEND`: If the table already exists, BigQuery appends the data
to the table.
`WRITE_EMPTY`: If the table already exists and contains data, a 'duplicate'
error is returned in the job result.
The default value is `WRITE_APPEND`.
Each action is atomic and only occurs if BigQuery is able to complete
the job successfully. Creation, truncation and append actions occur as one
atomic update upon job completion.
"""
WRITE_APPEND = 'WRITE_APPEND'
WRITE_TRUNCATE = 'WRITE_TRUNCATE'
WRITE_EMPTY = 'WRITE_EMPTY'
class _AsyncJob(google.api_core.future.polling.PollingFuture):
"""Base class for asynchronous jobs.
:type job_id: str
:param job_id: the job's ID in the project associated with the client.
:type client: :class:`google.cloud.bigquery.client.Client`
:param client: A client which holds credentials and project configuration.
"""
def __init__(self, job_id, client):
super(_AsyncJob, self).__init__()
self.job_id = job_id
self._client = client
self._properties = {}
self._result_set = False
self._completion_lock = threading.Lock()
@property
def project(self):
"""Project bound to the job.
:rtype: str
:returns: the project (derived from the client).
"""
return self._client.project
def _require_client(self, client):
"""Check client or verify over-ride.
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:rtype: :class:`google.cloud.bigquery.client.Client`
:returns: The client passed in or the currently bound client.
"""
if client is None:
client = self._client
return client
@property
def job_type(self):
"""Type of job
:rtype: str
:returns: one of 'load', 'copy', 'extract', 'query'
"""
return self._JOB_TYPE
@property
def path(self):
"""URL path for the job's APIs.
:rtype: str
:returns: the path based on project and job ID.
"""
return '/projects/%s/jobs/%s' % (self.project, self.job_id)
@property
def etag(self):
"""ETag for the job resource.
:rtype: str, or ``NoneType``
:returns: the ETag (None until set from the server).
"""
return self._properties.get('etag')
@property
def self_link(self):
"""URL for the job resource.
:rtype: str, or ``NoneType``
:returns: the URL (None until set from the server).
"""
return self._properties.get('selfLink')
@property
def user_email(self):
"""E-mail address of user who submitted the job.
:rtype: str, or ``NoneType``
:returns: the URL (None until set from the server).
"""
return self._properties.get('user_email')
@property
def created(self):
"""Datetime at which the job was created.
:rtype: ``datetime.datetime``, or ``NoneType``
:returns: the creation time (None until set from the server).
"""
statistics = self._properties.get('statistics')
if statistics is not None:
millis = statistics.get('creationTime')
if millis is not None:
return _datetime_from_microseconds(millis * 1000.0)
@property
def started(self):
"""Datetime at which the job was started.
:rtype: ``datetime.datetime``, or ``NoneType``
:returns: the start time (None until set from the server).
"""
statistics = self._properties.get('statistics')
if statistics is not None:
millis = statistics.get('startTime')
if millis is not None:
return _datetime_from_microseconds(millis * 1000.0)
@property
def ended(self):
"""Datetime at which the job finished.
:rtype: ``datetime.datetime``, or ``NoneType``
:returns: the end time (None until set from the server).
"""
statistics = self._properties.get('statistics')
if statistics is not None:
millis = statistics.get('endTime')
if millis is not None:
return _datetime_from_microseconds(millis * 1000.0)
def _job_statistics(self):
"""Helper for job-type specific statistics-based properties."""
statistics = self._properties.get('statistics', {})
return statistics.get(self._JOB_TYPE, {})
@property
def error_result(self):
"""Error information about the job as a whole.
:rtype: mapping, or ``NoneType``
:returns: the error information (None until set from the server).
"""
status = self._properties.get('status')
if status is not None:
return status.get('errorResult')
@property
def errors(self):
"""Information about individual errors generated by the job.
:rtype: list of mappings, or ``NoneType``
:returns: the error information (None until set from the server).
"""
status = self._properties.get('status')
if status is not None:
return status.get('errors')
@property
def state(self):
"""Status of the job.
:rtype: str, or ``NoneType``
:returns: the state (None until set from the server).
"""
status = self._properties.get('status')
if status is not None:
return status.get('state')
def _scrub_local_properties(self, cleaned):
"""Helper: handle subclass properties in cleaned."""
pass
def _copy_configuration_properties(self, configuration):
"""Helper: assign subclass configuration properties in cleaned."""
raise NotImplementedError("Abstract")
def _set_properties(self, api_response):
"""Update properties from resource in body of ``api_response``
:type api_response: dict
:param api_response: response returned from an API call
"""
cleaned = api_response.copy()
self._scrub_local_properties(cleaned)
statistics = cleaned.get('statistics', {})
if 'creationTime' in statistics:
statistics['creationTime'] = float(statistics['creationTime'])
if 'startTime' in statistics:
statistics['startTime'] = float(statistics['startTime'])
if 'endTime' in statistics:
statistics['endTime'] = float(statistics['endTime'])
self._properties.clear()
self._properties.update(cleaned)
self._copy_configuration_properties(cleaned['configuration'])
# For Future interface
self._set_future_result()
@classmethod
def _get_resource_config(cls, resource):
"""Helper for :meth:`from_api_repr`
:type resource: dict
:param resource: resource for the job
:rtype: dict
:returns: tuple (string, dict), where the first element is the
job ID and the second contains job-specific configuration.
:raises: :class:`KeyError` if the resource has no identifier, or
is missing the appropriate configuration.
"""
if ('jobReference' not in resource or
'jobId' not in resource['jobReference']):
raise KeyError('Resource lacks required identity information: '
'["jobReference"]["jobId"]')
job_id = resource['jobReference']['jobId']
if ('configuration' not in resource or
cls._JOB_TYPE not in resource['configuration']):
raise KeyError('Resource lacks required configuration: '
'["configuration"]["%s"]' % cls._JOB_TYPE)
return job_id, resource['configuration']
def _begin(self, client=None, retry=DEFAULT_RETRY):
"""API call: begin the job via a POST request
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:type retry: :class:`google.api_core.retry.Retry`
:param retry: (Optional) How to retry the RPC.
:raises: :exc:`ValueError` if the job has already begin.
"""
if self.state is not None:
raise ValueError("Job already begun.")
client = self._require_client(client)
path = '/projects/%s/jobs' % (self.project,)
# jobs.insert is idempotent because we ensure that every new
# job has an ID.
api_response = client._call_api(
retry,
method='POST', path=path, data=self._build_resource())
self._set_properties(api_response)
def exists(self, client=None, retry=DEFAULT_RETRY):
"""API call: test for the existence of the job via a GET request
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:type retry: :class:`google.api_core.retry.Retry`
:param retry: (Optional) How to retry the RPC.
:rtype: bool
:returns: Boolean indicating existence of the job.
"""
client = self._require_client(client)
try:
client._call_api(retry,
method='GET', path=self.path,
query_params={'fields': 'id'})
except NotFound:
return False
else:
return True
def reload(self, client=None, retry=DEFAULT_RETRY):
"""API call: refresh job properties via a GET request.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:type retry: :class:`google.api_core.retry.Retry`
:param retry: (Optional) How to retry the RPC.
"""
client = self._require_client(client)
api_response = client._call_api(retry, method='GET', path=self.path)
self._set_properties(api_response)
def cancel(self, client=None):
"""API call: cancel job via a POST request
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/cancel
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:rtype: bool
:returns: Boolean indicating that the cancel request was sent.
"""
client = self._require_client(client)
api_response = client._connection.api_request(
method='POST', path='%s/cancel' % (self.path,))
self._set_properties(api_response['job'])
# The Future interface requires that we return True if the *attempt*
# to cancel was successful.
return True
# The following methods implement the PollingFuture interface. Note that
# the methods above are from the pre-Future interface and are left for
# compatibility. The only "overloaded" method is :meth:`cancel`, which
# satisfies both interfaces.
def _set_future_result(self):
"""Set the result or exception from the job if it is complete."""
# This must be done in a lock to prevent the polling thread
# and main thread from both executing the completion logic
# at the same time.
with self._completion_lock:
# If the operation isn't complete or if the result has already been
# set, do not call set_result/set_exception again.
# Note: self._result_set is set to True in set_result and
# set_exception, in case those methods are invoked directly.
if self.state != _DONE_STATE or self._result_set:
return
if self.error_result is not None:
exception = _error_result_to_exception(self.error_result)
self.set_exception(exception)
else:
self.set_result(self)
def done(self, retry=DEFAULT_RETRY):
"""Refresh the job and checks if it is complete.
:type retry: :class:`google.api_core.retry.Retry`
:param retry: (Optional) How to retry the RPC.
:rtype: bool
:returns: True if the job is complete, False otherwise.
"""
# Do not refresh is the state is already done, as the job will not
# change once complete.
if self.state != _DONE_STATE:
self.reload(retry=retry)
return self.state == _DONE_STATE
def result(self, timeout=None):
"""Start the job and wait for it to complete and get the result.
:type timeout: float
:param timeout:
How long (in seconds) to wait for job to complete before raising
a :class:`concurrent.futures.TimeoutError`.
:rtype: _AsyncJob
:returns: This instance.
:raises:
:class:`~google.cloud.exceptions.GoogleCloudError` if the job
failed or :class:`concurrent.futures.TimeoutError` if the job did
not complete in the given timeout.
"""
if self.state is None:
self._begin()
# TODO: modify PollingFuture so it can pass a retry argument to done().
return super(_AsyncJob, self).result(timeout=timeout)
def cancelled(self):
"""Check if the job has been cancelled.
This always returns False. It's not possible to check if a job was
cancelled in the API. This method is here to satisfy the interface
for :class:`google.api_core.future.Future`.
:rtype: bool
:returns: False
"""
return (self.error_result is not None
and self.error_result.get('reason') == _STOPPED_REASON)
class _JobConfig(object):
"""Abstract base class for job configuration objects.
Arguments:
job_type (str): The key to use for the job configuration.
"""
def __init__(self, job_type):
self._job_type = job_type
self._properties = {job_type: {}}
def _get_sub_prop(self, key, default=None):
"""Get a value in the ``self._properties[self._job_type]`` dictionary.
Most job properties are inside the dictionary related to the job type
(e.g. 'copy', 'extract', 'load', 'query'). Use this method to access
those properties::
self._get_sub_prop('destinationTable')
This is equivalent to using the ``_helper.get_sub_prop`` function::
_helper.get_sub_prop(
self._properties, ['query', 'destinationTable'])
Arguments:
key (str):
Key for the value to get in the
``self._properties[self._job_type]`` dictionary.
default (object):
(Optional) Default value to return if the key is not found.
Defaults to ``None``.
Returns:
object: The value if present or the default.
"""
return _helpers.get_sub_prop(
self._properties, [self._job_type, key], default=default)
def _set_sub_prop(self, key, value):
"""Set a value in the ``self._properties[self._job_type]`` dictionary.
Most job properties are inside the dictionary related to the job type
(e.g. 'copy', 'extract', 'load', 'query'). Use this method to set
those properties::
self._set_sub_prop('useLegacySql', False)
This is equivalent to using the ``_helper.set_sub_prop`` function::
_helper.set_sub_prop(
self._properties, ['query', 'useLegacySql'], False)
Arguments:
key (str):
Key to set in the ``self._properties[self._job_type]``
dictionary.
value (object): Value to set.
"""
_helpers.set_sub_prop(self._properties, [self._job_type, key], value)
def to_api_repr(self):
"""Build an API representation of the job config.
:rtype: dict
:returns: A dictionary in the format used by the BigQuery API.
"""
return copy.deepcopy(self._properties)
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct a job configuration given its API representation
:type resource: dict
:param resource:
An extract job configuration in the same representation as is
returned from the API.
:rtype: :class:`google.cloud.bigquery.job._JobConfig`
:returns: Configuration parsed from ``resource``.
"""
config = cls()
config._properties = copy.deepcopy(resource)
return config
class LoadJobConfig(_JobConfig):
"""Configuration options for load jobs.
All properties in this class are optional. Values which are ``None`` ->
server defaults.
"""
def __init__(self):
super(LoadJobConfig, self).__init__('load')
@property
def allow_jagged_rows(self):
"""bool: Allow missing trailing optional columns (CSV only).
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.allowJaggedRows
"""
return self._get_sub_prop('allowJaggedRows')
@allow_jagged_rows.setter
def allow_jagged_rows(self, value):
self._set_sub_prop('allowJaggedRows', value)
@property
def allow_quoted_newlines(self):
"""bool: Allow quoted data containing newline characters (CSV only).
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.allowQuotedNewlines
"""
return self._get_sub_prop('allowQuotedNewlines')
@allow_quoted_newlines.setter
def allow_quoted_newlines(self, value):
self._set_sub_prop('allowQuotedNewlines', value)
@property
def autodetect(self):
"""bool: Automatically infer the schema from a sample of the data.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.autodetect
"""
return self._get_sub_prop('autodetect')
@autodetect.setter
def autodetect(self, value):
self._set_sub_prop('autodetect', value)
@property
def create_disposition(self):
"""google.cloud.bigquery.job.CreateDisposition: Specifies behavior
for creating tables.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.createDisposition
"""
return self._get_sub_prop('createDisposition')
@create_disposition.setter
def create_disposition(self, value):
self._set_sub_prop('createDisposition', value)
@property
def encoding(self):
"""google.cloud.bigquery.job.Encoding: The character encoding of the
data.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.encoding
"""
return self._get_sub_prop('encoding')
@encoding.setter
def encoding(self, value):
self._set_sub_prop('encoding', value)
@property
def field_delimiter(self):
"""str: The separator for fields in a CSV file.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.fieldDelimiter
"""
return self._get_sub_prop('fieldDelimiter')
@field_delimiter.setter
def field_delimiter(self, value):
self._set_sub_prop('fieldDelimiter', value)
@property
def ignore_unknown_values(self):
"""bool: Ignore extra values not represented in the table schema.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.ignoreUnknownValues
"""
return self._get_sub_prop('ignoreUnknownValues')
@ignore_unknown_values.setter
def ignore_unknown_values(self, value):
self._set_sub_prop('ignoreUnknownValues', value)
@property
def max_bad_records(self):
"""int: Number of invalid rows to ignore.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.maxBadRecords
"""
return self._get_sub_prop('maxBadRecords')
@max_bad_records.setter
def max_bad_records(self, value):
self._set_sub_prop('maxBadRecords', value)
@property
def null_marker(self):
"""str: Represents a null value (CSV only).
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.nullMarker
"""
return self._get_sub_prop('nullMarker')
@null_marker.setter
def null_marker(self, value):
self._set_sub_prop('nullMarker', value)
@property
def quote_character(self):
"""str: Character used to quote data sections (CSV only).
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.quote
"""
return self._get_sub_prop('quote')
@quote_character.setter
def quote_character(self, value):
self._set_sub_prop('quote', value)
@property
def skip_leading_rows(self):
"""int: Number of rows to skip when reading data (CSV only).
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.skipLeadingRows
"""
return _int_or_none(self._get_sub_prop('skipLeadingRows'))
@skip_leading_rows.setter
def skip_leading_rows(self, value):
self._set_sub_prop('skipLeadingRows', str(value))
@property
def source_format(self):
"""google.cloud.bigquery.job.SourceFormat: File format of the data.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.sourceFormat
"""
return self._get_sub_prop('sourceFormat')
@source_format.setter
def source_format(self, value):
self._set_sub_prop('sourceFormat', value)
@property
def write_disposition(self):
"""google.cloud.bigquery.job.WriteDisposition: Action that occurs if
the destination table already exists.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.writeDisposition
"""
return self._get_sub_prop('writeDisposition')
@write_disposition.setter
def write_disposition(self, value):
self._set_sub_prop('writeDisposition', value)
@property
def schema(self):
"""List[google.cloud.bigquery.schema.SchemaField]: Schema of the
destination table.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
"""
schema = _helpers.get_sub_prop(
self._properties, ['load', 'schema', 'fields'])
if schema is None:
return
return [SchemaField.from_api_repr(field) for field in schema]
@schema.setter
def schema(self, value):
if not all(hasattr(field, 'to_api_repr') for field in value):
raise ValueError('Schema items must be fields')
_helpers.set_sub_prop(
self._properties,
['load', 'schema', 'fields'],
[field.to_api_repr() for field in value])
@property
def destination_encryption_configuration(self):
"""google.cloud.bigquery.table.EncryptionConfiguration: Custom
encryption configuration for the destination table.
Custom encryption configuration (e.g., Cloud KMS keys) or ``None``
if using default encryption.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.destinationEncryptionConfiguration
"""
prop = self._get_sub_prop('destinationEncryptionConfiguration')
if prop is not None:
prop = EncryptionConfiguration.from_api_repr(prop)
return prop
@destination_encryption_configuration.setter
def destination_encryption_configuration(self, value):
api_repr = value
if value is not None:
api_repr = value.to_api_repr()
self._set_sub_prop('destinationEncryptionConfiguration', api_repr)
class LoadJob(_AsyncJob):
"""Asynchronous job for loading data into a table.
Can load from Google Cloud Storage URIs or from a file.
:type job_id: str
:param job_id: the job's ID
:type source_uris: sequence of string or ``NoneType``
:param source_uris:
URIs of one or more data files to be loaded. See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.sourceUris
for supported URI formats. Pass None for jobs that load from a file.
:type destination: :class:`google.cloud.bigquery.table.TableReference`
:param destination: reference to table into which data is to be loaded.
:type client: :class:`google.cloud.bigquery.client.Client`
:param client: A client which holds credentials and project configuration
for the dataset (which requires a project).
"""
_JOB_TYPE = 'load'
def __init__(self, job_id, source_uris, destination, client,
job_config=None):
super(LoadJob, self).__init__(job_id, client)
if job_config is None:
job_config = LoadJobConfig()
self.source_uris = source_uris
self.destination = destination
self._configuration = job_config
@property
def allow_jagged_rows(self):
"""See
:attr:`google.cloud.bigquery.job.LoadJobConfig.allow_jagged_rows`.
"""
return self._configuration.allow_jagged_rows
@property
def allow_quoted_newlines(self):
"""See
:attr:`google.cloud.bigquery.job.LoadJobConfig.allow_quoted_newlines`.
"""
return self._configuration.allow_quoted_newlines
@property
def autodetect(self):
"""See
:attr:`google.cloud.bigquery.job.LoadJobConfig.autodetect`.
"""
return self._configuration.autodetect
@property
def create_disposition(self):
"""See
:attr:`google.cloud.bigquery.job.LoadJobConfig.create_disposition`.
"""
return self._configuration.create_disposition
@property
def encoding(self):
"""See
:attr:`google.cloud.bigquery.job.LoadJobConfig.encoding`.
"""
return self._configuration.encoding
@property
def field_delimiter(self):
"""See
:attr:`google.cloud.bigquery.job.LoadJobConfig.field_delimiter`.
"""
return self._configuration.field_delimiter
@property
def ignore_unknown_values(self):
"""See
:attr:`google.cloud.bigquery.job.LoadJobConfig.ignore_unknown_values`.
"""
return self._configuration.ignore_unknown_values
@property
def max_bad_records(self):
"""See
:attr:`google.cloud.bigquery.job.LoadJobConfig.max_bad_records`.
"""
return self._configuration.max_bad_records
@property
def null_marker(self):
"""See
:attr:`google.cloud.bigquery.job.LoadJobConfig.null_marker`.
"""
return self._configuration.null_marker
@property
def quote_character(self):
"""See
:attr:`google.cloud.bigquery.job.LoadJobConfig.quote_character`.
"""
return self._configuration.quote_character
@property
def skip_leading_rows(self):
"""See
:attr:`google.cloud.bigquery.job.LoadJobConfig.skip_leading_rows`.
"""
return self._configuration.skip_leading_rows
@property
def source_format(self):
"""See
:attr:`google.cloud.bigquery.job.LoadJobConfig.source_format`.
"""
return self._configuration.source_format
@property
def write_disposition(self):
"""See
:attr:`google.cloud.bigquery.job.LoadJobConfig.write_disposition`.
"""
return self._configuration.write_disposition
@property
def schema(self):
"""See
:attr:`google.cloud.bigquery.job.LoadJobConfig.schema`.
"""
return self._configuration.schema
@property
def destination_encryption_configuration(self):
"""google.cloud.bigquery.table.EncryptionConfiguration: Custom
encryption configuration for the destination table.
Custom encryption configuration (e.g., Cloud KMS keys)
or ``None`` if using default encryption.
See
:attr:`google.cloud.bigquery.job.LoadJobConfig.destination_encryption_configuration`.
"""
return self._configuration.destination_encryption_configuration
@property
def input_file_bytes(self):
"""Count of bytes loaded from source files.
:rtype: int, or ``NoneType``
:returns: the count (None until set from the server).
:raises: ValueError for invalid value types.
"""
statistics = self._properties.get('statistics')
if statistics is not None:
return int(statistics['load']['inputFileBytes'])
@property
def input_files(self):
"""Count of source files.
:rtype: int, or ``NoneType``
:returns: the count (None until set from the server).
"""
statistics = self._properties.get('statistics')
if statistics is not None:
return int(statistics['load']['inputFiles'])
@property
def output_bytes(self):
"""Count of bytes saved to destination table.
:rtype: int, or ``NoneType``
:returns: the count (None until set from the server).
"""
statistics = self._properties.get('statistics')
if statistics is not None:
return int(statistics['load']['outputBytes'])
@property
def output_rows(self):
"""Count of rows saved to destination table.
:rtype: int, or ``NoneType``
:returns: the count (None until set from the server).
"""
statistics = self._properties.get('statistics')
if statistics is not None:
return int(statistics['load']['outputRows'])
def _build_resource(self):
"""Generate a resource for :meth:`begin`."""
configuration = self._configuration.to_api_repr()
if self.source_uris is not None:
_helpers.set_sub_prop(
configuration, ['load', 'sourceUris'], self.source_uris)
_helpers.set_sub_prop(
configuration,
['load', 'destinationTable'],
self.destination.to_api_repr())
return {
'jobReference': {
'projectId': self.project,
'jobId': self.job_id,
},
'configuration': configuration,
}
def _copy_configuration_properties(self, configuration):
"""Helper: assign subclass configuration properties in cleaned."""
self._configuration._properties = copy.deepcopy(configuration)
@classmethod
def from_api_repr(cls, resource, client):
"""Factory: construct a job given its API representation
.. note:
This method assumes that the project found in the resource matches
the client's project.
:type resource: dict
:param resource: dataset job representation returned from the API
:type client: :class:`google.cloud.bigquery.client.Client`
:param client: Client which holds credentials and project
configuration for the dataset.
:rtype: :class:`google.cloud.bigquery.job.LoadJob`
:returns: Job parsed from ``resource``.
"""
job_id, config_resource = cls._get_resource_config(resource)
config = LoadJobConfig.from_api_repr(config_resource)
dest_config = _helpers.get_sub_prop(
config_resource, ['load', 'destinationTable'])
ds_ref = DatasetReference(dest_config['projectId'],
dest_config['datasetId'],)
destination = TableReference(ds_ref, dest_config['tableId'])
# sourceUris will be absent if this is a file upload.
source_uris = _helpers.get_sub_prop(
config_resource, ['load', 'sourceUris'])
job = cls(job_id, source_uris, destination, client, config)
job._set_properties(resource)
return job
class CopyJobConfig(_JobConfig):
"""Configuration options for copy jobs.
All properties in this class are optional. Values which are ``None`` ->
server defaults.
"""
def __init__(self):
super(CopyJobConfig, self).__init__('copy')
@property
def create_disposition(self):
"""google.cloud.bigquery.job.CreateDisposition: Specifies behavior
for creating tables.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.copy.createDisposition
"""
return self._get_sub_prop('createDisposition')
@create_disposition.setter
def create_disposition(self, value):
self._set_sub_prop('createDisposition', value)
@property
def write_disposition(self):
"""google.cloud.bigquery.job.WriteDisposition: Action that occurs if
the destination table already exists.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.copy.writeDisposition
"""
return self._get_sub_prop('writeDisposition')
@write_disposition.setter
def write_disposition(self, value):
self._set_sub_prop('writeDisposition', value)
@property
def destination_encryption_configuration(self):
"""google.cloud.bigquery.table.EncryptionConfiguration: Custom
encryption configuration for the destination table.
Custom encryption configuration (e.g., Cloud KMS keys) or ``None``
if using default encryption.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.copy.destinationEncryptionConfiguration
"""
prop = self._get_sub_prop('destinationEncryptionConfiguration')
if prop is not None:
prop = EncryptionConfiguration.from_api_repr(prop)
return prop
@destination_encryption_configuration.setter
def destination_encryption_configuration(self, value):
api_repr = value
if value is not None:
api_repr = value.to_api_repr()
self._set_sub_prop('destinationEncryptionConfiguration', api_repr)
class CopyJob(_AsyncJob):
"""Asynchronous job: copy data into a table from other tables.
:type job_id: str
:param job_id: the job's ID, within the project belonging to ``client``.
:type sources: list of :class:`google.cloud.bigquery.table.TableReference`
:param sources: Table into which data is to be loaded.
:type destination: :class:`google.cloud.bigquery.table.TableReference`
:param destination: Table into which data is to be loaded.
:type client: :class:`google.cloud.bigquery.client.Client`
:param client: A client which holds credentials and project configuration
for the dataset (which requires a project).
:type job_config: :class:`~google.cloud.bigquery.job.CopyJobConfig`
:param job_config:
(Optional) Extra configuration options for the copy job.
"""
_JOB_TYPE = 'copy'
def __init__(self, job_id, sources, destination, client, job_config=None):
super(CopyJob, self).__init__(job_id, client)
if job_config is None:
job_config = CopyJobConfig()
self.destination = destination
self.sources = sources
self._configuration = job_config
@property
def create_disposition(self):
"""See
:attr:`google.cloud.bigquery.job.CopyJobConfig.create_disposition`.
"""
return self._configuration.create_disposition
@property
def write_disposition(self):
"""See
:attr:`google.cloud.bigquery.job.CopyJobConfig.write_disposition`.
"""
return self._configuration.write_disposition
@property
def destination_encryption_configuration(self):
"""google.cloud.bigquery.table.EncryptionConfiguration: Custom
encryption configuration for the destination table.
Custom encryption configuration (e.g., Cloud KMS keys) or ``None``
if using default encryption.
See
:attr:`google.cloud.bigquery.job.CopyJobConfig.destination_encryption_configuration`.
"""
return self._configuration.destination_encryption_configuration
def _build_resource(self):
"""Generate a resource for :meth:`begin`."""
source_refs = [{
'projectId': table.project,
'datasetId': table.dataset_id,
'tableId': table.table_id,
} for table in self.sources]
configuration = self._configuration.to_api_repr()
_helpers.set_sub_prop(
configuration, ['copy', 'sourceTables'], source_refs)
_helpers.set_sub_prop(
configuration,
['copy', 'destinationTable'],
{
'projectId': self.destination.project,
'datasetId': self.destination.dataset_id,
'tableId': self.destination.table_id,
})
return {
'jobReference': {
'projectId': self.project,
'jobId': self.job_id,
},
'configuration': configuration,
}
def _copy_configuration_properties(self, configuration):
"""Helper: assign subclass configuration properties in cleaned."""
self._configuration._properties = copy.deepcopy(configuration)
@classmethod
def from_api_repr(cls, resource, client):
"""Factory: construct a job given its API representation
.. note:
This method assumes that the project found in the resource matches
the client's project.
:type resource: dict
:param resource: dataset job representation returned from the API
:type client: :class:`google.cloud.bigquery.client.Client`
:param client: Client which holds credentials and project
configuration for the dataset.
:rtype: :class:`google.cloud.bigquery.job.CopyJob`
:returns: Job parsed from ``resource``.
"""
job_id, config_resource = cls._get_resource_config(resource)
config = CopyJobConfig.from_api_repr(config_resource)
# Copy required fields to the job.
copy_resource = config_resource['copy']
destination = TableReference.from_api_repr(
copy_resource['destinationTable'])
sources = []
source_configs = copy_resource.get('sourceTables')
if source_configs is None:
single = copy_resource.get('sourceTable')
if single is None:
raise KeyError(
"Resource missing 'sourceTables' / 'sourceTable'")
source_configs = [single]
for source_config in source_configs:
table_ref = TableReference.from_api_repr(source_config)
sources.append(table_ref)
job = cls(
job_id, sources, destination, client=client, job_config=config)
job._set_properties(resource)
return job
class ExtractJobConfig(_JobConfig):
"""Configuration options for extract jobs.
All properties in this class are optional. Values which are ``None`` ->
server defaults.
"""
def __init__(self):
super(ExtractJobConfig, self).__init__('extract')
@property
def compression(self):
"""google.cloud.bigquery.job.Compression: Compression type to use for
exported files.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract.compression
"""
return self._get_sub_prop('compression')
@compression.setter
def compression(self, value):
self._set_sub_prop('compression', value)
@property
def destination_format(self):
"""google.cloud.bigquery.job.DestinationFormat: Exported file format.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract.destinationFormat
"""
return self._get_sub_prop('destinationFormat')
@destination_format.setter
def destination_format(self, value):
self._set_sub_prop('destinationFormat', value)
@property
def field_delimiter(self):
"""str: Delimiter to use between fields in the exported data.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract.fieldDelimiter
"""
return self._get_sub_prop('fieldDelimiter')
@field_delimiter.setter
def field_delimiter(self, value):
self._set_sub_prop('fieldDelimiter', value)
@property
def print_header(self):
"""bool: Print a header row in the exported data.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract.printHeader
"""
return self._get_sub_prop('printHeader')
@print_header.setter
def print_header(self, value):
self._set_sub_prop('printHeader', value)
class ExtractJob(_AsyncJob):
"""Asynchronous job: extract data from a table into Cloud Storage.
:type job_id: str
:param job_id: the job's ID
:type source: :class:`google.cloud.bigquery.table.TableReference`
:param source: Table into which data is to be loaded.
:type destination_uris: list of string
:param destination_uris:
URIs describing where the extracted data will be written in Cloud
Storage, using the format ``gs://<bucket_name>/<object_name_or_glob>``.
:type client: :class:`google.cloud.bigquery.client.Client`
:param client:
A client which holds credentials and project configuration.
:type job_config: :class:`~google.cloud.bigquery.job.ExtractJobConfig`
:param job_config:
(Optional) Extra configuration options for the extract job.
"""
_JOB_TYPE = 'extract'
def __init__(
self, job_id, source, destination_uris, client, job_config=None):
super(ExtractJob, self).__init__(job_id, client)
if job_config is None:
job_config = ExtractJobConfig()
self.source = source
self.destination_uris = destination_uris
self._configuration = job_config
@property
def compression(self):
"""See
:attr:`google.cloud.bigquery.job.ExtractJobConfig.compression`.
"""
return self._configuration.compression
@property
def destination_format(self):
"""See
:attr:`google.cloud.bigquery.job.ExtractJobConfig.destination_format`.
"""
return self._configuration.destination_format
@property
def field_delimiter(self):
"""See
:attr:`google.cloud.bigquery.job.ExtractJobConfig.field_delimiter`.
"""
return self._configuration.field_delimiter
@property
def print_header(self):
"""See
:attr:`google.cloud.bigquery.job.ExtractJobConfig.print_header`.
"""
return self._configuration.print_header
@property
def destination_uri_file_counts(self):
"""Return file counts from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.extract.destinationUriFileCounts
:rtype: int or None
:returns: number of DML rows affectd by the job, or None if job is not
yet complete.
"""
result = self._job_statistics().get('destinationUriFileCounts')
if result is not None:
result = int(result)
return result
def _build_resource(self):
"""Generate a resource for :meth:`begin`."""
source_ref = {
'projectId': self.source.project,
'datasetId': self.source.dataset_id,
'tableId': self.source.table_id,
}
configuration = self._configuration.to_api_repr()
_helpers.set_sub_prop(
configuration, ['extract', 'sourceTable'], source_ref)
_helpers.set_sub_prop(
configuration,
['extract', 'destinationUris'],
self.destination_uris)
resource = {
'jobReference': {
'projectId': self.project,
'jobId': self.job_id,
},
'configuration': configuration,
}
return resource
def _copy_configuration_properties(self, configuration):
"""Helper: assign subclass configuration properties in cleaned."""
self._configuration._properties = copy.deepcopy(configuration)
@classmethod
def from_api_repr(cls, resource, client):
"""Factory: construct a job given its API representation
.. note:
This method assumes that the project found in the resource matches
the client's project.
:type resource: dict
:param resource: dataset job representation returned from the API
:type client: :class:`google.cloud.bigquery.client.Client`
:param client: Client which holds credentials and project
configuration for the dataset.
:rtype: :class:`google.cloud.bigquery.job.ExtractJob`
:returns: Job parsed from ``resource``.
"""
job_id, config_resource = cls._get_resource_config(resource)
config = ExtractJobConfig.from_api_repr(config_resource)
source_config = _helpers.get_sub_prop(
config_resource, ['extract', 'sourceTable'])
dataset = DatasetReference(
source_config['projectId'], source_config['datasetId'])
source = dataset.table(source_config['tableId'])
destination_uris = _helpers.get_sub_prop(
config_resource, ['extract', 'destinationUris'])
job = cls(
job_id, source, destination_uris, client=client, job_config=config)
job._set_properties(resource)
return job
def _from_api_repr_query_parameters(resource):
return [
_query_param_from_api_repr(mapping)
for mapping in resource
]
def _to_api_repr_query_parameters(value):
return [
query_parameter.to_api_repr()
for query_parameter in value
]
def _from_api_repr_udf_resources(resource):
udf_resources = []
for udf_mapping in resource:
for udf_type, udf_value in udf_mapping.items():
udf_resources.append(UDFResource(udf_type, udf_value))
return udf_resources
def _to_api_repr_udf_resources(value):
return [
{udf_resource.udf_type: udf_resource.value}
for udf_resource in value
]
def _from_api_repr_table_defs(resource):
return {k: ExternalConfig.from_api_repr(v) for k, v in resource.items()}
def _to_api_repr_table_defs(value):
return {k: ExternalConfig.to_api_repr(v) for k, v in value.items()}
class QueryJobConfig(_JobConfig):
"""Configuration options for query jobs.
All properties in this class are optional. Values which are ``None`` ->
server defaults.
"""
def __init__(self):
super(QueryJobConfig, self).__init__('query')
@property
def destination_encryption_configuration(self):
"""google.cloud.bigquery.table.EncryptionConfiguration: Custom
encryption configuration for the destination table.
Custom encryption configuration (e.g., Cloud KMS keys) or ``None``
if using default encryption.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationEncryptionConfiguration
"""
prop = self._get_sub_prop('destinationEncryptionConfiguration')
if prop is not None:
prop = EncryptionConfiguration.from_api_repr(prop)
return prop
@destination_encryption_configuration.setter
def destination_encryption_configuration(self, value):
api_repr = value
if value is not None:
api_repr = value.to_api_repr()
self._set_sub_prop('destinationEncryptionConfiguration', api_repr)
@property
def allow_large_results(self):
"""bool: Allow large query results tables (legacy SQL, only)
See
https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.allowLargeResults
"""
return self._get_sub_prop('allowLargeResults')
@allow_large_results.setter
def allow_large_results(self, value):
self._set_sub_prop('allowLargeResults', value)
@property
def create_disposition(self):
"""google.cloud.bigquery.job.CreateDisposition: Specifies behavior
for creating tables.
See
https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.createDisposition
"""
return self._get_sub_prop('createDisposition')
@create_disposition.setter
def create_disposition(self, value):
self._set_sub_prop('createDisposition', value)
@property
def default_dataset(self):
"""google.cloud.bigquery.dataset.DatasetReference: the default dataset
to use for unqualified table names in the query or ``None`` if not set.
See
https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.query.defaultDataset
"""
prop = self._get_sub_prop('defaultDataset')
if prop is not None:
prop = DatasetReference.from_api_repr(prop)
return prop
@default_dataset.setter
def default_dataset(self, value):
resource = None
if value is not None:
resource = value.to_api_repr()
self._set_sub_prop('defaultDataset', resource)
@property
def destination(self):
"""google.cloud.bigquery.table.TableReference: table where results are
written or ``None`` if not set.
See
https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationTable
"""
prop = self._get_sub_prop('destinationTable')
if prop is not None:
prop = TableReference.from_api_repr(prop)
return prop
@destination.setter
def destination(self, value):
resource = None
if value is not None:
resource = value.to_api_repr()
self._set_sub_prop('destinationTable', resource)
@property
def dry_run(self):
"""bool: ``True`` if this query should be a dry run to estimate costs.
See
https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.dryRun
"""
return self._properties.get('dryRun')
@dry_run.setter
def dry_run(self, value):
self._properties['dryRun'] = value
@property
def flatten_results(self):
"""bool: Flatten nested/repeated fields in results. (Legacy SQL only)
See
https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.flattenResults
"""
return self._get_sub_prop('flattenResults')
@flatten_results.setter
def flatten_results(self, value):
self._set_sub_prop('flattenResults', value)
@property
def maximum_billing_tier(self):
"""int: Deprecated. Changes the billing tier to allow high-compute
queries.
See
https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.maximumBillingTier
"""
return self._get_sub_prop('maximumBillingTier')
@maximum_billing_tier.setter
def maximum_billing_tier(self, value):
self._set_sub_prop('maximumBillingTier', value)
@property
def maximum_bytes_billed(self):
"""int: Maximum bytes to be billed for this job or ``None`` if not set.
See
https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.maximumBytesBilled
"""
return _int_or_none(self._get_sub_prop('maximumBytesBilled'))
@maximum_bytes_billed.setter
def maximum_bytes_billed(self, value):
self._set_sub_prop('maximumBytesBilled', str(value))
@property
def priority(self):
"""google.cloud.bigquery.job.QueryPriority: Priority of the query.
See
https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.priority
"""
return self._get_sub_prop('priority')
@priority.setter
def priority(self, value):
self._set_sub_prop('priority', value)
@property
def query_parameters(self):
"""List[Union[google.cloud.bigquery.query.ArrayQueryParameter, \
google.cloud.bigquery.query.ScalarQueryParameter, \
google.cloud.bigquery.query.StructQueryParameter]]: list of parameters
for parameterized query (empty by default)
See:
https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.queryParameters
"""
prop = self._get_sub_prop('queryParameters', default=[])
return _from_api_repr_query_parameters(prop)
@query_parameters.setter
def query_parameters(self, values):
self._set_sub_prop(
'queryParameters', _to_api_repr_query_parameters(values))
@property
def udf_resources(self):
"""List[google.cloud.bigquery.query.UDFResource]: user
defined function resources (empty by default)
See:
https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.userDefinedFunctionResources
"""
prop = self._get_sub_prop('userDefinedFunctionResources', default=[])
return _from_api_repr_udf_resources(prop)
@udf_resources.setter
def udf_resources(self, values):
self._set_sub_prop(
'userDefinedFunctionResources',
_to_api_repr_udf_resources(values))
@property
def use_legacy_sql(self):
"""bool: Use legacy SQL syntax.
See
https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.query.useLegacySql
"""
return self._get_sub_prop('useLegacySql')
@use_legacy_sql.setter
def use_legacy_sql(self, value):
self._set_sub_prop('useLegacySql', value)
@property
def use_query_cache(self):
"""bool: Look for the query result in the cache.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.useQueryCache
"""
return self._get_sub_prop('useQueryCache')
@use_query_cache.setter
def use_query_cache(self, value):
self._set_sub_prop('useQueryCache', value)
@property
def write_disposition(self):
"""google.cloud.bigquery.job.WriteDisposition: Action that occurs if
the destination table already exists.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.writeDisposition
"""
return self._get_sub_prop('writeDisposition')
@write_disposition.setter
def write_disposition(self, value):
self._set_sub_prop('writeDisposition', value)
@property
def table_definitions(self):
"""Dict[str, google.cloud.bigquery.external_config.ExternalConfig]:
Definitions for external tables or ``None`` if not set.
See
https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions
"""
prop = self._get_sub_prop('tableDefinitions')
if prop is not None:
prop = _from_api_repr_table_defs(prop)
return prop
@table_definitions.setter
def table_definitions(self, values):
self._set_sub_prop(
'tableDefinitions', _to_api_repr_table_defs(values))
def to_api_repr(self):
"""Build an API representation of the query job config.
Returns:
dict: A dictionary in the format used by the BigQuery API.
"""
resource = copy.deepcopy(self._properties)
# Query parameters have an addition property associated with them
# to indicate if the query is using named or positional parameters.
query_parameters = resource['query'].get('queryParameters')
if query_parameters:
if query_parameters[0].get('name') is None:
resource['query']['parameterMode'] = 'POSITIONAL'
else:
resource['query']['parameterMode'] = 'NAMED'
return resource
class QueryJob(_AsyncJob):
"""Asynchronous job: query tables.
:type job_id: str
:param job_id: the job's ID, within the project belonging to ``client``.
:type query: str
:param query: SQL query string
:type client: :class:`google.cloud.bigquery.client.Client`
:param client: A client which holds credentials and project configuration
for the dataset (which requires a project).
:type job_config: :class:`~google.cloud.bigquery.job.QueryJobConfig`
:param job_config:
(Optional) Extra configuration options for the query job.
"""
_JOB_TYPE = 'query'
_UDF_KEY = 'userDefinedFunctionResources'
def __init__(self, job_id, query, client, job_config=None):
super(QueryJob, self).__init__(job_id, client)
if job_config is None:
job_config = QueryJobConfig()
if job_config.use_legacy_sql is None:
job_config.use_legacy_sql = False
self.query = query
self._configuration = job_config
self._query_results = None
self._done_timeout = None
@property
def allow_large_results(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.allow_large_results`.
"""
return self._configuration.allow_large_results
@property
def create_disposition(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.create_disposition`.
"""
return self._configuration.create_disposition
@property
def default_dataset(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.default_dataset`.
"""
return self._configuration.default_dataset
@property
def destination(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.destination`.
"""
return self._configuration.destination
@property
def destination_encryption_configuration(self):
"""google.cloud.bigquery.table.EncryptionConfiguration: Custom
encryption configuration for the destination table.
Custom encryption configuration (e.g., Cloud KMS keys) or ``None``
if using default encryption.
See
:attr:`google.cloud.bigquery.job.QueryJobConfig.destination_encryption_configuration`.
"""
return self._configuration.destination_encryption_configuration
@property
def dry_run(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.dry_run`.
"""
return self._configuration.dry_run
@property
def flatten_results(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.flatten_results`.
"""
return self._configuration.flatten_results
@property
def priority(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.priority`.
"""
return self._configuration.priority
@property
def query_parameters(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.query_parameters`.
"""
return self._configuration.query_parameters
@property
def udf_resources(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.udf_resources`.
"""
return self._configuration.udf_resources
@property
def use_legacy_sql(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.use_legacy_sql`.
"""
return self._configuration.use_legacy_sql
@property
def use_query_cache(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.use_query_cache`.
"""
return self._configuration.use_query_cache
@property
def write_disposition(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.write_disposition`.
"""
return self._configuration.write_disposition
@property
def maximum_billing_tier(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.maximum_billing_tier`.
"""
return self._configuration.maximum_billing_tier
@property
def maximum_bytes_billed(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.maximum_bytes_billed`.
"""
return self._configuration.maximum_bytes_billed
@property
def table_definitions(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.table_definitions`.
"""
return self._configuration.table_definitions
def _build_resource(self):
"""Generate a resource for :meth:`begin`."""
configuration = self._configuration.to_api_repr()
resource = {
'jobReference': {
'projectId': self.project,
'jobId': self.job_id,
},
'configuration': configuration,
}
configuration['query']['query'] = self.query
return resource
def _copy_configuration_properties(self, configuration):
"""Helper: assign subclass configuration properties in cleaned."""
self._configuration._properties = copy.deepcopy(configuration)
self.query = _helpers.get_sub_prop(configuration, ['query', 'query'])
@classmethod
def from_api_repr(cls, resource, client):
"""Factory: construct a job given its API representation
:type resource: dict
:param resource: dataset job representation returned from the API
:type client: :class:`google.cloud.bigquery.client.Client`
:param client: Client which holds credentials and project
configuration for the dataset.
:rtype: :class:`google.cloud.bigquery.job.QueryJob`
:returns: Job parsed from ``resource``.
"""
job_id, config = cls._get_resource_config(resource)
query = config['query']['query']
job = cls(job_id, query, client=client)
job._set_properties(resource)
return job
@property
def query_plan(self):
"""Return query plan from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.queryPlan
:rtype: list of :class:`QueryPlanEntry`
:returns: mappings describing the query plan, or an empty list
if the query has not yet completed.
"""
plan_entries = self._job_statistics().get('queryPlan', ())
return [QueryPlanEntry.from_api_repr(entry) for entry in plan_entries]
@property
def total_bytes_processed(self):
"""Return total bytes processed from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.totalBytesProcessed
:rtype: int or None
:returns: total bytes processed by the job, or None if job is not
yet complete.
"""
result = self._job_statistics().get('totalBytesProcessed')
if result is not None:
result = int(result)
return result
@property
def total_bytes_billed(self):
"""Return total bytes billed from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.totalBytesBilled
:rtype: int or None
:returns: total bytes processed by the job, or None if job is not
yet complete.
"""
result = self._job_statistics().get('totalBytesBilled')
if result is not None:
result = int(result)
return result
@property
def billing_tier(self):
"""Return billing tier from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.billingTier
:rtype: int or None
:returns: billing tier used by the job, or None if job is not
yet complete.
"""
return self._job_statistics().get('billingTier')
@property
def cache_hit(self):
"""Return whether or not query results were served from cache.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.cacheHit
:rtype: bool or None
:returns: whether the query results were returned from cache, or None
if job is not yet complete.
"""
return self._job_statistics().get('cacheHit')
@property
def num_dml_affected_rows(self):
"""Return the number of DML rows affected by the job.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.numDmlAffectedRows
:rtype: int or None
:returns: number of DML rows affected by the job, or None if job is not
yet complete.
"""
result = self._job_statistics().get('numDmlAffectedRows')
if result is not None:
result = int(result)
return result
@property
def statement_type(self):
"""Return statement type from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.statementType
:rtype: str or None
:returns: type of statement used by the job, or None if job is not
yet complete.
"""
return self._job_statistics().get('statementType')
@property
def referenced_tables(self):
"""Return referenced tables from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.referencedTables
:rtype: list of dict
:returns: mappings describing the query plan, or an empty list
if the query has not yet completed.
"""
tables = []
datasets_by_project_name = {}
for table in self._job_statistics().get('referencedTables', ()):
t_project = table['projectId']
ds_id = table['datasetId']
t_dataset = datasets_by_project_name.get((t_project, ds_id))
if t_dataset is None:
t_dataset = DatasetReference(t_project, ds_id)
datasets_by_project_name[(t_project, ds_id)] = t_dataset
t_name = table['tableId']
tables.append(t_dataset.table(t_name))
return tables
@property
def undeclared_query_parameters(self):
"""Return undeclared query parameters from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.undeclaredQueryParameters
:rtype:
list of
:class:`~google.cloud.bigquery.ArrayQueryParameter`,
:class:`~google.cloud.bigquery.ScalarQueryParameter`, or
:class:`~google.cloud.bigquery.StructQueryParameter`
:returns: undeclared parameters, or an empty list if the query has
not yet completed.
"""
parameters = []
undeclared = self._job_statistics().get(
'undeclaredQueryParameters', ())
for parameter in undeclared:
p_type = parameter['parameterType']
if 'arrayType' in p_type:
klass = ArrayQueryParameter
elif 'structTypes' in p_type:
klass = StructQueryParameter
else:
klass = ScalarQueryParameter
parameters.append(klass.from_api_repr(parameter))
return parameters
def done(self, retry=DEFAULT_RETRY):
"""Refresh the job and checks if it is complete.
:rtype: bool
:returns: True if the job is complete, False otherwise.
"""
# Since the API to getQueryResults can hang up to the timeout value
# (default of 10 seconds), set the timeout parameter to ensure that
# the timeout from the futures API is respected. See:
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4135
timeout_ms = None
if self._done_timeout is not None:
# Subtract a buffer for context switching, network latency, etc.
timeout = self._done_timeout - _TIMEOUT_BUFFER_SECS
timeout = max(min(timeout, 10), 0)
self._done_timeout -= timeout
self._done_timeout = max(0, self._done_timeout)
timeout_ms = int(timeout * 1000)
# Do not refresh is the state is already done, as the job will not
# change once complete.
if self.state != _DONE_STATE:
self._query_results = self._client._get_query_results(
self.job_id, retry,
project=self.project, timeout_ms=timeout_ms)
# Only reload the job once we know the query is complete.
# This will ensure that fields such as the destination table are
# correctly populated.
if self._query_results.complete:
self.reload(retry=retry)
return self.state == _DONE_STATE
def _blocking_poll(self, timeout=None):
self._done_timeout = timeout
super(QueryJob, self)._blocking_poll(timeout=timeout)
def result(self, timeout=None, retry=DEFAULT_RETRY):
"""Start the job and wait for it to complete and get the result.
:type timeout: float
:param timeout:
How long (in seconds) to wait for job to complete before raising
a :class:`concurrent.futures.TimeoutError`.
:type retry: :class:`google.api_core.retry.Retry`
:param retry: (Optional) How to retry the call that retrieves rows.
:rtype: :class:`~google.cloud.bigquery.table.RowIterator`
:returns:
Iterator of row data :class:`~google.cloud.bigquery.table.Row`-s.
During each page, the iterator will have the ``total_rows``
attribute set, which counts the total number of rows **in the
result set** (this is distinct from the total number of rows in
the current page: ``iterator.page.num_items``).
:raises:
:class:`~google.cloud.exceptions.GoogleCloudError` if the job
failed or :class:`concurrent.futures.TimeoutError` if the job did
not complete in the given timeout.
"""
super(QueryJob, self).result(timeout=timeout)
# Return an iterator instead of returning the job.
if not self._query_results:
self._query_results = self._client._get_query_results(
self.job_id, retry, project=self.project)
schema = self._query_results.schema
dest_table = self.destination
return self._client.list_rows(dest_table, selected_fields=schema,
retry=retry)
def to_dataframe(self):
"""Return a pandas DataFrame from a QueryJob
Returns:
A :class:`~pandas.DataFrame` populated with row data and column
headers from the query results. The column headers are derived
from the destination table's schema.
Raises:
ValueError: If the `pandas` library cannot be imported.
"""
return self.result().to_dataframe()
def __iter__(self):
return iter(self.result())
class QueryPlanEntryStep(object):
"""Map a single step in a query plan entry.
:type kind: str
:param kind: step type
:type substeps:
:param substeps: names of substeps
"""
def __init__(self, kind, substeps):
self.kind = kind
self.substeps = list(substeps)
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct instance from the JSON repr.
:type resource: dict
:param resource: JSON representation of the entry
:rtype: :class:`QueryPlanEntryStep`
:return: new instance built from the resource
"""
return cls(
kind=resource.get('kind'),
substeps=resource.get('substeps', ()),
)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.kind == other.kind and self.substeps == other.substeps
class QueryPlanEntry(object):
"""Map a single entry in a query plan.
:type name: str
:param name: name of the entry
:type entry_id: int
:param entry_id: ID of the entry
:type wait_ratio_avg: float
:param wait_ratio_avg: average wait ratio
:type wait_ratio_max: float
:param wait_ratio_max: maximum wait ratio
:type read_ratio_avg: float
:param read_ratio_avg: average read ratio
:type read_ratio_max: float
:param read_ratio_max: maximum read ratio
:type compute_ratio_avg: float
:param compute_ratio_avg: average compute ratio
:type compute_ratio_max: float
:param compute_ratio_max: maximum compute ratio
:type write_ratio_avg: float
:param write_ratio_avg: average write ratio
:type write_ratio_max: float
:param write_ratio_max: maximum write ratio
:type records_read: int
:param records_read: number of records read
:type records_written: int
:param records_written: number of records written
:type status: str
:param status: entry status
:type steps: List(QueryPlanEntryStep)
:param steps: steps in the entry
"""
def __init__(self,
name,
entry_id,
wait_ratio_avg,
wait_ratio_max,
read_ratio_avg,
read_ratio_max,
compute_ratio_avg,
compute_ratio_max,
write_ratio_avg,
write_ratio_max,
records_read,
records_written,
status,
steps):
self.name = name
self.entry_id = entry_id
self.wait_ratio_avg = wait_ratio_avg
self.wait_ratio_max = wait_ratio_max
self.read_ratio_avg = read_ratio_avg
self.read_ratio_max = read_ratio_max
self.compute_ratio_avg = compute_ratio_avg
self.compute_ratio_max = compute_ratio_max
self.write_ratio_avg = write_ratio_avg
self.write_ratio_max = write_ratio_max
self.records_read = records_read
self.records_written = records_written
self.status = status
self.steps = steps
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct instance from the JSON repr.
:type resource: dict
:param resource: JSON representation of the entry
:rtype: :class:`QueryPlanEntry`
:return: new instance built from the resource
"""
records_read = resource.get('recordsRead')
if records_read is not None:
records_read = int(records_read)
records_written = resource.get('recordsWritten')
if records_written is not None:
records_written = int(records_written)
return cls(
name=resource.get('name'),
entry_id=resource.get('id'),
wait_ratio_avg=resource.get('waitRatioAvg'),
wait_ratio_max=resource.get('waitRatioMax'),
read_ratio_avg=resource.get('readRatioAvg'),
read_ratio_max=resource.get('readRatioMax'),
compute_ratio_avg=resource.get('computeRatioAvg'),
compute_ratio_max=resource.get('computeRatioMax'),
write_ratio_avg=resource.get('writeRatioAvg'),
write_ratio_max=resource.get('writeRatioMax'),
records_read=records_read,
records_written=records_written,
status=resource.get('status'),
steps=[QueryPlanEntryStep.from_api_repr(step)
for step in resource.get('steps', ())],
)
|
the-stack_0_16381
|
from h2o.estimators import H2ODeepLearningEstimator, H2OGradientBoostingEstimator, H2OGeneralizedLinearEstimator, \
H2ONaiveBayesEstimator, H2ORandomForestEstimator
from sklearn.base import BaseEstimator
import h2o
import pandas as pd
class H2ODecorator(BaseEstimator):
def __init__(self, est_type, est_params, nthreads=-1, mem_max='2G', target_type=None):
# using H2O estimator classes directly does not work correctly hence string to class mapping is used
est_map = {
'dl': H2ODeepLearningEstimator,
'gbm': H2OGradientBoostingEstimator,
'glm': H2OGeneralizedLinearEstimator,
'nb': H2ONaiveBayesEstimator,
'rf': H2ORandomForestEstimator,
}
self.est_type = est_type
self.est_params = est_params
self.est = est_map[est_type](**est_params)
self.nthreads = nthreads
self.mem_max = mem_max
self.cluster_ready = False
self.target_type = target_type
def _init_h2o(self):
if self.cluster_ready:
return
h2o.init(nthreads=self.nthreads, max_mem_size=self.mem_max)
self.cluster_ready = True
def fit(self, X, y):
self._init_h2o()
features = h2o.H2OFrame(python_obj=X)
if type(y) == pd.Series:
_y = y.values
else:
_y = y
if self.target_type is not None:
target_type = [self.target_type]
else:
target_type = None
target = h2o.H2OFrame(python_obj=_y, column_types=target_type)
self.est.fit(features, target)
return self
def predict(self, X):
self._init_h2o()
features = h2o.H2OFrame(python_obj=X)
pred_df = self.est.predict(features).as_data_frame()
if pred_df.columns.contains('predict'):
return pred_df['predict']
else:
return pred_df.iloc[:, 0]
def predict_proba(self, X):
self._init_h2o()
features = h2o.H2OFrame(python_obj=X)
pred_df = self.est.predict(features).as_data_frame()
if pred_df.columns.contains('predict'):
return pred_df.drop('predict', axis=1).values
else:
return pred_df.drop(pred_df.columns[0], axis=1).values
|
the-stack_0_16382
|
#! /usr/bin/env python
# example patch call:
# ./extract_patches.py -subset 202 -slices 64 -dim 64
#### ---- Imports & Dependencies ---- ####
import sys
import os
import argparse
from configparser import ConfigParser
import pathlib
from glob import glob
from random import shuffle
import SimpleITK as sitk # pip install SimpleITK
from tqdm import tqdm # pip install tqdm
import h5py
import pandas as pd
import numpy as np
from scipy.spatial import distance
#### ---- Argparse Utility ---- ####
parser = argparse.ArgumentParser(description='Modify the patch extractor script',add_help=True)
parser.add_argument('-hdf5',
action="store_true",
dest="hdf5",
default=True,
help='Save processed data to hdf5')
parser.add_argument('-hu_norm',
action="store_true",
dest="hu_norm",
default=False,
help='Normalize Patch to -1000 - 400 HU')
parser.add_argument('-slices',
type=int,
action="store",
dest="slices",
default=1,
help='Num of tensor slices > 0, default = 1')
parser.add_argument('-dim',
action="store",
dest="dim",
type=int,
default=64,
help='Dimension of the patch, default = 64')
parser.add_argument('-remote',
action="store_true",
dest="remote",
default=False,
help='Use if running script remote e.g. AWS')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('-subset',
action="store",
dest="subset",
type=lambda s: ['subset'+str(x)+'/' for x in s.split(',')], #pass a list of nums to arg
required=True,
help='subset dir name or number(s) e.g. 0,1,2')
args = parser.parse_args()
#### ---- ConfigParse Utility ---- ####
config = ConfigParser()
config.read('extract_patches_config.ini') #local just for now (need if - else for AWS)
'''
Example extract_patches_config.ini file:
[local]
LUNA_PATH = /Users/keil/datasets/LUNA16/
CSV_PATH = /Users/keil/datasets/LUNA16/csv-files/
IMG_PATH = /Users/keil/datasets/LUNA16/patches/
[remote]
# - when we move to AWS
'''
#### ---- Global Vars ---- ####
LUNA_PATH = config.get('local', 'LUNA_PATH')
CSV_PATH = config.get('local', 'CSV_PATH')
IMG_PATH = config.get('local', 'IMG_PATH')
SUBSET = args.subset
SAVE_HDF5 = args.hdf5
HU_NORM = args.hu_norm
PATCH_DIM = args.dim
NUM_SLICES = args.slices
CHANNELS = 1
PATCH_WIDTH = PATCH_DIM/2
PATCH_HEIGHT = PATCH_DIM/2
PATCH_DEPTH = NUM_SLICES/2
# WORK_REMOTE = args.remote #add later w/ AWS
#TODO add this to config file for csv file name
DF_NODE = pd.read_csv(CSV_PATH + "candidates_V2.csv")
DF_NODE1 = pd.read_csv(CSV_PATH + "candidates_with_annotations.csv")
# DF_NODE = pd.read_csv(CSV_PATH + "candidates_with_annotations.csv")
FILE_LIST = []
SUBSET_LIST = []
for unique_set in SUBSET:
mhd_files = glob("{}{}/*.mhd".format(LUNA_PATH, unique_set))
FILE_LIST.extend(mhd_files) #add subset of .mhd files
subset_num = unique_set.strip('subset/') #extracting out subset number
for elements in mhd_files: #making sure we match each globbed mhd file to a subset num
SUBSET_LIST.append(int(subset_num)) #pass this list later to write subset num to HDF5
#### ---- Helper Functions ---- ####
def normalizePlanes(npzarray):
"""
Normalize pixel depth into Hounsfield units (HU), between -1000 - 400 HU
All other HU will be masked. Then we normalize pixel values between 0 and 1.
"""
maxHU, minHU = 400., -1000.
npzarray = (npzarray - minHU) / (maxHU - minHU)
npzarray[npzarray>1] = 1.
npzarray[npzarray<0] = 0.
return npzarray
def normalize_img(img):
"""
Sets the MHD image to be approximately 1.0 mm voxel size
https://itk.org/ITKExamples/src/Filtering/ImageGrid/ResampleAnImage/Documentation.html
"""
# Number of pixels you want for x,y,z dimensions
new_x_size = int(img.GetSpacing()[0]*img.GetWidth())
new_y_size = int(img.GetSpacing()[1]*img.GetHeight())
new_z_size = int(img.GetSpacing()[2]*img.GetDepth())
new_size = [new_x_size, new_y_size, new_z_size]
new_spacing = [1,1,1] # New spacing to be 1.0 x 1.0 x 1.0 mm voxel size
interpolator_type = sitk.sitkBSpline #sitkLinear using BSpline over Linear
return sitk.Resample(img, np.array(new_size, dtype='uint32').tolist(),
sitk.Transform(),
interpolator_type,
img.GetOrigin(),
new_spacing,
img.GetDirection(),
0.0,
img.GetPixelIDValue())
def make_bbox(center,width,height,depth,origin,class_id):
"""
Returns a 3d (numpy tensor) bounding box from the CT scan.
2d in the case where PATCH_DEPTH = 1
"""
# left = np.max([0, np.abs(center[0] - origin[0]) - PATCH_WIDTH]).astype(int)
left = np.max([0, center[0] - PATCH_WIDTH]).astype(int)
right = np.min([width, center[0] + PATCH_WIDTH]).astype(int)
# left = int((np.abs(center[0] - origin[0])) - PATCH_WIDTH) #DEBUG
# right = int((np.abs(center[0] - origin[0])) + PATCH_WIDTH) #DEBUG
down = np.max([0, center[1] - PATCH_HEIGHT]).astype(int)
up = np.min([height, center[1] + PATCH_HEIGHT]).astype(int)
top = np.min([depth, center[2] + PATCH_DEPTH]).astype(int)
bottom = np.max([0, center[2] - PATCH_DEPTH]).astype(int)
bbox = [[down, up], [left, right], [bottom, top]] #(back,abdomen - left side, right side - feet, head)
# If bbox has a origin - center - PATCH_DIM/2 that results in a 0, (rarely the case)
# ensure that the bbox dims are all [PATCH_DIM x PATCH_DIM x PATCH_DIM]
if bbox[0][0] == 0:
bbox[0][1] = PATCH_DIM
elif bbox[1][0] == 0:
bbox[1][1] = PATCH_DIM
elif bbox[2][0] == 0:
bbox[2][1] = NUM_SLICES # change to --slice dim
return bbox
def downsample_class_0(df):
"""
Returns a pd.DataFrame where class 0s that collide with class 1s
have been flagged based on a distance measurement threshold.
Threshold = PATCH_DIM/2
The flag will be written to HDF5 and let the user know not to train on these class 0s
"""
empty_col = [0 for x in range(len(df))]
idx_to_flag = []
df.reset_index(inplace=True)
if 1 in df['class'].tolist(): #check series ID for a positive nodule
df_class_1 = df[df["class"] == 1].copy(deep=True)
ones_coords = df_class_1[["coordX", "coordY", "coordZ"]].values
for idx, row in df.iterrows():
#check for a class 1
if row['class'] == 1:
continue
#set vars for calculation
zero_coord = (row['coordX'],row['coordY'],row['coordZ'])
for one_coord in ones_coords:
dst = distance.euclidean(zero_coord,one_coord)
if dst <= PATCH_DIM/2: #follow this heuristic for downsampling class 0
idx_to_flag.append(idx)
else:
df = df.assign(no_train = empty_col)
return df
idx_to_flag = list(set(idx_to_flag))
downsample_col = []
for idx, i in enumerate(empty_col):
if idx in idx_to_flag:
downsample_col.append(1)
else:
downsample_col.append(0)
df = df.assign(no_train = downsample_col)
return df
def write_to_hdf5(dset_and_data,first_patch=False):
"""Accept zipped hdf5 dataset obj and numpy data, write data to dataset"""
dset = dset_and_data[0] #hdf5 dataset obj
data = dset_and_data[1] #1D numpy hdf5 writable data
if first_patch == True:
dset[:] = data #set the whole, empty, hdf5 dset = data
return
row = dset.shape[0] # Count current dataset rows
dset.resize(row+1, axis=0) # Add new row
dset[row, :] = data # Insert data into new row
return
#### ---- Process CT Scans and extract Patches (the pipeline) ---- ####
def main():
"""
Create the hdf5 file + datasets, iterate thriough the folders DICOM imgs
Normalize the imgs, create mini patches and write them to the hdf5 file system
"""
with h5py.File(LUNA_PATH + str(PATCH_DIM) + 'x' + str(PATCH_DIM) + 'x' + str(NUM_SLICES) + '-patch-withdiam.hdf5', 'w') as HDF5:
# Datasets for 3d patch tensors & class_id/x,y,z coords
total_patch_dim = PATCH_DIM * PATCH_DIM * NUM_SLICES
patch_dset = HDF5.create_dataset('input', (1,total_patch_dim), maxshape=(None,total_patch_dim)) #patches = inputs
class_dset = HDF5.create_dataset('output', (1,1), maxshape=(None,1), dtype=int) #classes = outputs
notrain_dset = HDF5.create_dataset('notrain', (1,1), maxshape=(None,1), dtype=int) # test holdout
centroid_dset = HDF5.create_dataset('centroid', (1,3), maxshape=(None,3), dtype=float)
uuid_dset = HDF5.create_dataset('uuid', (1,1), maxshape=(None,None), dtype=h5py.special_dtype(vlen=bytes))
subset_dset = HDF5.create_dataset('subsets', (1,1), maxshape=(None,1), dtype=int)
diameter_dset = HDF5.create_dataset('diameter_label', (1,1), maxshape=(None,1), dtype=int)
HDF5['input'].attrs['lshape'] = (PATCH_DIM, PATCH_DIM, NUM_SLICES, CHANNELS) # (Height, Width, Depth)
print("Successfully initiated the HDF5 file. Ready to recieve data!")
#### ---- Iterating through a CT scan ---- ####
counter = 0
scan_number = 1
first_patch = True # flag for saving first img to hdf5
for img_file, subset_id in tqdm(zip(FILE_LIST,SUBSET_LIST)):
print("Processing CT Scan: {}".format(scan_number))
base=os.path.basename(img_file) # Strip the filename out
seriesuid = os.path.splitext(base)[0] # Get the filename without the extension
mini_df = DF_NODE[DF_NODE["seriesuid"] == seriesuid]
#### ---- Downsampling Class 0s ---- ####
mini_df = downsample_class_0(mini_df)
mini_df1 = DF_NODE1[DF_NODE1["seriesuid"] == seriesuid]
mini_df1.fillna(-1, inplace=True)
# Load the CT scan (3D .mhd file)
# Numpy is z,y,x and SimpleITK is x,y,z -- (note the ordering of dimesions)
itk_img = sitk.ReadImage(img_file)
# Normalize the image spacing so that a voxel is 1x1x1 mm in dimension
itk_img = normalize_img(itk_img)
# SimpleITK keeps the origin and spacing information for the 3D image volume
img_array = sitk.GetArrayFromImage(itk_img) # indices are z,y,x (note the ordering of dimesions)
img_array = np.pad(img_array, int(PATCH_DIM), mode="constant", constant_values=-2000)#, constant_values=0) #0 padding 3d array for patch clipping issue
slice_z, height, width = img_array.shape
origin = np.array(itk_img.GetOrigin()) # x,y,z Origin in world coordinates (mm) - Not same as img_array
spacing = np.array(itk_img.GetSpacing()) # spacing of voxels in world coordinates (mm)
scan_number += 1
#### ---- Iterating through a CT scan's slices ---- ####
for candidate_idx, cur_row in mini_df.iterrows(): # Iterate through all candidates (in dataframe)
# This is the real world x,y,z coordinates of possible nodule (in mm)
class_id = cur_row["class"] #0 for false, 1 for true nodule
no_train = cur_row["no_train"]
candidate_x = cur_row["coordX"] + PATCH_DIM
candidate_y = cur_row["coordY"] + PATCH_DIM
candidate_z = cur_row["coordZ"] + PATCH_DIM
center = np.array([candidate_x, candidate_y, candidate_z]) # candidate center
voxel_center = np.rint(np.abs(center / spacing - origin)).astype(int) # candidate center in voxels
# if class_id is a 1, then we find the diameter, if there is no diameter it is
# because it is from the candidates v2 file, and we do not have this info for the
# nodule, thus we can not make a mask for it. We set the diameter label to -1
# and set the flag of no_train to false so we do not use it for training unet.
if class_id == 0:
diameter_label = 0
else:
diameter_label = mini_df1[(mini_df1['coordX']==cur_row["coordX"]) & (mini_df1['coordY']==cur_row["coordY"]) & (mini_df1['coordZ']==cur_row["coordZ"])]['diameter_mm'].values
if len(diameter_label) == 0:
diameter_label = -1
no_train = 1
# put a counter here
#### ---- Generating the 2d/2.5d/3d Patch ---- ####
bbox = make_bbox(voxel_center, width, height, slice_z, origin, class_id) #return bounding box
patch = img_array[
bbox[2][0]:bbox[2][1],
bbox[0][0]:bbox[0][1],
bbox[1][0]:bbox[1][1]]
# DEBUG print(patch.shape) #uncomment to debug shape size being written
#### ---- Perform Hounsfield Normlization ---- ####
if HU_NORM:
patch = normalizePlanes(patch) #normalize patch to HU units
#### ---- Prepare Data for HDF5 insert ---- ####
patch = patch.ravel().reshape(1,-1) #flatten img to (1 x N)
if patch.shape[1] != total_patch_dim: # Catch any class 0 bbox issues and pass them
counter += 1
continue
#minor fix to subtract the PATCH_DIM from each centroid when saving to HDF5 to match candidates_V2.csv
centroid_data = np.array([candidate_x - PATCH_DIM,candidate_y - PATCH_DIM,candidate_z - PATCH_DIM]).ravel().reshape(1,-1)
seriesuid_str = np.string_(seriesuid) #set seriesuid str to numpy.bytes_ type
#### ---- Write Data to HDF5 insert ---- ####
hdf5_dsets = [patch_dset, class_dset, notrain_dset, uuid_dset, subset_dset, centroid_dset,diameter_dset]
hdf5_data = [patch, class_id, no_train, seriesuid_str, subset_id, centroid_data,diameter_label]
for dset_and_data in zip(hdf5_dsets,hdf5_data):
if first_patch == True:
write_to_hdf5(dset_and_data,first_patch=True)
else:
write_to_hdf5(dset_and_data)
first_patch = False
print("Did not write: " + str(counter) + " patches to HDF5")
print("All {} CT Scans Processed and Individual Patches written to HDF5!".format(scan_number))
print('\a')
if __name__ == '__main__':
main()
|
the-stack_0_16387
|
from argparse import ArgumentParser
from fmovies_api import Fmovies, FmoviesConfig
import os
import re
class Application():
def __init__(self):
self.config_data = {}
def setup_parser():
parser = ArgumentParser()
parser.add_argument("-u", "--base-url",
help="The base url for the website.")
parser.add_argument(
"--search-path", help="Absolute or relative url to search page. If relative, base-url must be supplied.")
parser.add_argument("-c", "--config-file",
help="Input file for the configuration.")
self.data = FmoviesConfig.parse(parser.parse_args(), type=(
"file" if parser.config_file else "data"))
def main():
pass
if __name__ == "__main__":
setup_parser()
main()
|
the-stack_0_16389
|
"""
A backend to export DXF using a custom DXF renderer.
This allows saving of DXF figures.
Use as a matplotlib external backend:
import matplotlib
matplotlib.use('module://mpldxf.backend_dxf')
or register:
matplotlib.backend_bases.register_backend('dxf', FigureCanvasDxf)
Based on matplotlib.backends.backend_template.py.
Copyright (C) 2014 David M Kent
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import matplotlib
from matplotlib.backend_bases import (RendererBase, FigureCanvasBase,
GraphicsContextBase)
import ezdxf
from . import dxf_colors
# When packaged with py2exe ezdxf has issues finding its templates
# We tell it where to find them using this.
# Note we also need to make sure they get packaged by adding them to the
# configuration in setup.py
if hasattr(sys, 'frozen'):
ezdxf.options.template_dir = os.path.dirname(sys.executable)
def rgb_to_dxf(rgb_val):
"""Convert an RGB[A] colour to DXF colour index.
``rgb_val`` should be a tuple of values in range 0.0 - 1.0. Any
alpha value is ignored.
"""
if rgb_val is not None:
dxfcolor = dxf_colors.nearest_index([255.0 * val for val in rgb_val[:3]])
else:
dxfcolor = dxf_colors.BLACK
return dxfcolor
class RendererDxf(RendererBase):
"""
The renderer handles drawing/rendering operations.
Renders the drawing using the ``ezdxf`` package.
"""
def __init__(self, width, height, dpi, dxfversion):
RendererBase.__init__(self)
self.height = height
self.width = width
self.dpi = dpi
self.dxfversion = dxfversion
self._init_drawing()
def _init_drawing(self):
"""Create a drawing, set some global information and add
the layers we need.
"""
drawing = ezdxf.new(dxfversion=self.dxfversion)
modelspace = drawing.modelspace()
drawing.header['$EXTMIN'] = (0, 0, 0)
drawing.header['$EXTMAX'] = (self.width, self.height, 0)
self.drawing = drawing
self.modelspace = modelspace
def clear(self):
"""Reset the renderer."""
super(RendererDxf, self).clear()
self._init_drawing()
def draw_path(self, gc, path, transform, rgbFace=None):
"""Draw a path.
To do this we need to decide which DXF entity is most appropriate
for the path. We choose from lwpolylines or hatches.
"""
dxfcolor = rgb_to_dxf(rgbFace)
for vertices in path.to_polygons(transform=transform):
if rgbFace is not None and vertices.shape[0] > 2:
# we have a face color so we draw a filled polygon,
# in DXF this means a HATCH entity
hatch = self.modelspace.add_hatch(dxfcolor)
with hatch.edit_boundary() as editor:
editor.add_polyline_path(vertices)
else:
# A non-filled polygon or a line - use LWPOLYLINE entity
attrs = {
'color': dxfcolor,
}
self.modelspace.add_lwpolyline(vertices, attrs)
def draw_image(self, gc, x, y, im):
pass
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
fontsize = self.points_to_pixels(prop.get_size_in_points())
dxfcolor = rgb_to_dxf(gc.get_rgb())
text = self.modelspace.add_text(s.encode('ascii', 'ignore'), {
'height': fontsize,
'rotation': angle,
'color': dxfcolor,
})
halign = self._map_align(mtext.get_ha(), vert=False)
valign = self._map_align(mtext.get_va(), vert=True)
align = valign
if align:
align += '_'
align += halign
p1 = x, y
p2 = (x - 50, y)
text.set_pos(p1, p2=p2, align=align)
def _map_align(self, align, vert=False):
"""Translate a matplotlib text alignment to the ezdxf alignment."""
if align in ['right', 'center', 'left', 'top',
'bottom', 'middle']:
align = align.upper()
elif align == 'baseline':
align = ''
else:
raise NotImplementedError
if vert and align == 'CENTER':
align = 'MIDDLE'
return align
def flipy(self):
return False
def get_canvas_width_height(self):
return self.width, self.height
def new_gc(self):
return GraphicsContextBase()
def points_to_pixels(self, points):
return points / 72.0 * self.dpi
class FigureCanvasDxf(FigureCanvasBase):
"""
A canvas to use the renderer. This only implements enough of the
API to allow the export of DXF to file.
"""
#: The DXF version to use. This can be set to another version
#: supported by ezdxf if desired.
DXFVERSION = 'AC1015'
def get_dxf_renderer(self, cleared=False):
"""Get a renderer to use. Will create a new one if we don't
alreadty have one or if the figure dimensions or resolution have
changed.
"""
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
try:
self._lastKey, self.dxf_renderer
except AttributeError:
need_new_renderer = True
else:
need_new_renderer = (self._lastKey != key)
if need_new_renderer:
self.dxf_renderer = RendererDxf(w, h, self.figure.dpi,
self.DXFVERSION)
self._lastKey = key
elif cleared:
self.dxf_renderer.clear()
return self.dxf_renderer
def draw(self):
"""
Draw the figure using the renderer
"""
renderer = self.get_dxf_renderer()
self.figure.draw(renderer)
return renderer.drawing
# Add DXF to the class-scope filetypes dictionary
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['dxf'] = 'DXF'
def print_dxf(self, filename, *args, **kwargs):
"""
Write out a DXF file.
"""
drawing = self.draw()
drawing.saveas(filename)
def get_default_filetype(self):
return 'dxf'
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasDxf
|
the-stack_0_16391
|
"""File for Azure Event Hub models."""
from __future__ import annotations
from dataclasses import dataclass
import logging
from azure.eventhub.aio import EventHubProducerClient, EventHubSharedKeyCredential
from .const import ADDITIONAL_ARGS, CONF_EVENT_HUB_CON_STRING
_LOGGER = logging.getLogger(__name__)
@dataclass
class AzureEventHubClient:
"""Class for the Azure Event Hub client. Use from_input to initialize."""
event_hub_instance_name: str
@property
def client(self) -> EventHubProducerClient:
"""Return the client."""
async def test_connection(self) -> None:
"""Test connection, will throw EventHubError when it cannot connect."""
async with self.client as client:
await client.get_eventhub_properties()
@classmethod
def from_input(cls, **kwargs) -> AzureEventHubClient:
"""Create the right class."""
if CONF_EVENT_HUB_CON_STRING in kwargs:
return AzureEventHubClientConnectionString(**kwargs)
return AzureEventHubClientSAS(**kwargs)
@dataclass
class AzureEventHubClientConnectionString(AzureEventHubClient):
"""Class for Connection String based Azure Event Hub Client."""
event_hub_connection_string: str
@property
def client(self) -> EventHubProducerClient:
"""Return the client."""
return EventHubProducerClient.from_connection_string(
conn_str=self.event_hub_connection_string,
eventhub_name=self.event_hub_instance_name,
**ADDITIONAL_ARGS,
)
@dataclass
class AzureEventHubClientSAS(AzureEventHubClient):
"""Class for SAS based Azure Event Hub Client."""
event_hub_namespace: str
event_hub_sas_policy: str
event_hub_sas_key: str
@property
def client(self) -> EventHubProducerClient:
"""Get a Event Producer Client."""
return EventHubProducerClient(
fully_qualified_namespace=f"{self.event_hub_namespace}.servicebus.windows.net",
eventhub_name=self.event_hub_instance_name,
credential=EventHubSharedKeyCredential( # type: ignore
policy=self.event_hub_sas_policy, key=self.event_hub_sas_key
),
**ADDITIONAL_ARGS,
)
|
the-stack_0_16393
|
# coding=utf-8
# Copyright 2018 David Mack
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from absl import logging
from absl import flags
FLAGS = flags.FLAGS
from compare_gan.metrics import eval_task
import tensorflow as tf
import tensorflow_gan as tfgan
import imageio
import math
import numpy as np
flags.DEFINE_string(
"example_dir", "./examples",
"Where to save generated image examples")
flags.DEFINE_integer(
"example_count", 100,
"How many generated image examples to save")
class SaveExamplesTask():
"""Quick and dirty image saver."""
_LABEL = "save_examples"
def merge(self, images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter ''must have dimensions: HxW or HxWx3 or HxWx4')
def run_after_session(self, fake_dset, real_dest, step, force_label=None):
tf.io.gfile.makedirs(FLAGS.example_dir)
n_images = fake_dset.images.shape[0]
if force_label is not None:
label_str = "force_label_" + str(force_label)
else:
label_str = "all_labels"
# for i in range(min(n_images, FLAGS.example_count)):
# filename = os.path.join(FLAGS.example_dir, step, label_str + '_%03d.png' % i)
# with tf.io.gfile.GFile(filename, 'w') as file:
# imageio.imwrite(file, fake_dset.images[i], format='png')
grid_size = (int(math.sqrt(n_images))+1, int(math.sqrt(n_images)))
grid = self.merge(fake_dset.images, grid_size)
filename = os.path.join(FLAGS.example_dir, step + '_' + label_str + '_grid.png')
with tf.io.gfile.GFile(filename, 'w') as file:
imageio.imwrite(file, grid, format='png')
return {self._LABEL: FLAGS.example_count}
|
the-stack_0_16397
|
from __future__ import print_function
"""
Before running this script there is a
dependency for reading WMO BUFR:
pip install pybufrkit
This library has a way to work within the python script
but as of now it works using a command line interface.
It is important to download the files from the ftp using binary mode!
This was tested on python 2.7 using operational files
"""
def convert_bufr_hd(bufr_file, path_out=None, VERSION=1):
"""
This function will parse a WMO BUFR sounding using a
pure python toolkit: PyBufrKit
The parsing will be done using a query over the bufr
using tables included in the package.
Parameters
----------
bufr_file : str
Only the bufr file name.
path_out : str (optional)
The full path of the out file.
VERSION : int (optional)
Which version is used to get the final file name. It could be 1
that is calculated from the launching time or 2 that is extracted
from the file header. The default is 2.
"""
import os
import sys
import subprocess
from datetime import datetime, timedelta
# This will parse only the WMO station number to check if
# we have to process the file or return
region = subprocess.check_output(
'pybufrkit query 001001 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2]
number = subprocess.check_output(
'pybufrkit query 001002 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2]
wmo_code = region + number
if wmo_code == '87344':
directory = 'COR/'
print('Reading BUFR {bufr_file}'.format(bufr_file=bufr_file))
print('The station is {wmo_code}: Cordoba Aero'.format(wmo_code=wmo_code))
elif wmo_code == '87155':
directory = 'SIS/'
print('Reading BUFR {bufr_file}'.format(bufr_file=bufr_file))
print('The station is {wmo_code}: Resistencia Aero'.format(wmo_code=wmo_code))
elif wmo_code == '87244':
directory = 'VMRS/'
print('Reading BUFR {bufr_file}'.format(bufr_file=bufr_file))
print('The station is {wmo_code}: Villa Maria del Rio Seco'.format(wmo_code=wmo_code))
elif wmo_code == '87418':
directory = 'MDZ/'
print('Reading BUFR {bufr_file}'.format(bufr_file=bufr_file))
print('The station is {wmo_code}: Mendoza Aero'.format(wmo_code=wmo_code))
elif wmo_code == '87576':
print('The station is {wmo_code}: Ezeiza Aero, do not process'.format(wmo_code=wmo_code))
print()
return
else:
print('Do not care about station {wmo_code}'.format(wmo_code=wmo_code))
print()
return
if not os.path.exists(path_out+directory):
os.makedirs(path_out+directory)
# Now we parse the date and time of the baloon launching
year = subprocess.check_output(
'pybufrkit query 004001 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2]
month = subprocess.check_output(
'pybufrkit query 004002 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2]
day = subprocess.check_output(
'pybufrkit query 004003 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2]
hour = subprocess.check_output(
'pybufrkit query 004004 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2]
minute = subprocess.check_output(
'pybufrkit query 004005 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2]
second = subprocess.check_output(
'pybufrkit query 004006 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2]
time_of_launch = datetime(int(year),
int(month),
int(day),
int(hour),
int(minute),
int(second))
print('The time of launching was: {time_of_launch}'.format(time_of_launch=time_of_launch))
if VERSION == 1:
time_of_sounding = datetime(time_of_launch.year,
time_of_launch.month,
time_of_launch.day,
time_of_launch.hour,
0,
0)
# In order to have the correct output file name we have to check
# and correct the time of launching.
# If it is around the hour (5 minutes after and before)
# we can assume the launching was within
# an hourly schedule and we are ok with the time_of_sounding being
# the same as time_of_launch.
# But as the other soundings (i.e. with regular or tri-hourly schedules)
# are launched way before the hour we have to add one hour to the
# time_of_launch.
# Either way if the sounding was supposed to be 12UTC it may be the case
# that the time_of_launch still is between
if (time_of_launch.minute > 5) and (time_of_launch.minute < 55):
time_of_sounding = time_of_sounding + timedelta(hours=1)
# if time_of_sounding.hour == 11:
# time_of_sounding = time_of_sounding + timedelta(hours=1)
# elif time_of_sounding.hour == 23:
# time_of_sounding = time_of_sounding + timedelta(hours=1)
elif VERSION == 2:
year = subprocess.check_output(
'pybufrkit info {bufr_file} | grep year'.format(bufr_file=bufr_file),
shell=True).decode().split('\n')[-2].split(' ')[-1]
month = subprocess.check_output(
'pybufrkit info {bufr_file} | grep month'.format(bufr_file=bufr_file),
shell=True).decode().split('\n')[-2].split(' ')[-1]
day = subprocess.check_output(
'pybufrkit info {bufr_file} | grep day'.format(bufr_file=bufr_file),
shell=True).decode().split('\n')[-2].split(' ')[-1]
hour = subprocess.check_output(
'pybufrkit info {bufr_file} | grep hour'.format(bufr_file=bufr_file),
shell=True).decode().split('\n')[-2].split(' ')[-1]
minute = subprocess.check_output(
'pybufrkit info {bufr_file} | grep minute'.format(bufr_file=bufr_file),
shell=True).decode().split('\n')[-2].split(' ')[-1]
second = subprocess.check_output(
'pybufrkit info {bufr_file} | grep second'.format(bufr_file=bufr_file),
shell=True).decode().split('\n')[-2].split(' ')[-1]
time_of_sounding = datetime(int(year),
int(month),
int(day),
int(hour),
int(minute),
int(second))
print('The datetime of the file name will be: {time_of_sounding}'.format(time_of_sounding=time_of_sounding))
file_name = time_of_sounding.strftime('%y%m%d_%H_{wmo_code}.lst'.format(wmo_code=wmo_code))
# print(file_name)
# Here we can add the sounding site to the path
file_name_path = '{path_out}{directory}{file_name}'.format(path_out=path_out,
directory=directory,
file_name=file_name)
if os.path.exists(file_name_path):
print('Already did {file_name_path}'.format(file_name_path=file_name_path))
print()
return
# pressure in Pa
pressure = subprocess.check_output(
'pybufrkit query 007004 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2]
# geopotential height
height = subprocess.check_output(
'pybufrkit query 010009 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2]
# Temperature in K
temp = subprocess.check_output(
'pybufrkit query 012101 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2]
# Dew point temperature in K
temp_dew = subprocess.check_output(
'pybufrkit query 012103 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2]
# Wind direction in degrees
dir_v = subprocess.check_output(
'pybufrkit query 011001 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2]
# Wind speed in m/s
vel_v = subprocess.check_output(
'pybufrkit query 011002 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2]
pressure = [float(x)/100 for x in pressure.split(',')]
height = [int(x) for x in height.split(',')]
temp = [round(float(x)-273.15, 2) for x in temp.split(',')] # convert to Celsius
temp_dew = [round(float(x)-273.15, 2) for x in temp_dew.split(',')] # convert to Celsius
dir_v = [int(x) for x in dir_v.split(',')]
vel_v = [round(float(x)*1.94384, 4) for x in vel_v.split(',')] # convert to kt
print('Starting to write in: {file_name_path}'.format(file_name_path=file_name_path))
with open('{file_name_path}'.format(file_name_path=file_name_path), 'w') as fid:
for p, h, t, td, dv, vv in zip(pressure, height, temp, temp_dew, dir_v, vel_v):
fid.write('\t{p},\t{h},\t{t},\t{td},\t{dv},\t{vv}\n'.format(p=p,
h=h,
t=t,
td=td,
dv=dv,
vv=vv))
print('Finished writing the csv')
print()
#################################################
# Example of usage with glob
# we might move the processed bufr to another directory
# this import needs to happens at the beginning of the file
# from __future__ import print_function
import os
import sys
import glob
# Check input parameters
script = sys.argv[0]
if len(sys.argv) != 3:
print('Useage: {script} [path_in] [path_out]'.format(script=script))
quit()
path_in = sys.argv[1]
path_out = sys.argv[2]
print('path_in = {path_in}'.format(path_in=path_in))
print('path_out = {path_out}'.format(path_out=path_out))
bufr_list = glob.glob('{path_in}/radiosondeos_*'.format(path_in=path_in))
for bufr_file in bufr_list:
convert_bufr_hd(bufr_file, path_out)
# then move bufr already processed
os.rename('{bufr_file}'.format(bufr_file=bufr_file),
'{path_in}/processed/{bufr_file}'.format(path_in=path_in,
bufr_file=bufr_file.split("/")[-1]))
|
the-stack_0_16398
|
""" Default (fixed) hyperparameter values used in Neural network model """
from ....constants import BINARY, MULTICLASS, REGRESSION
def get_fixed_params():
""" Parameters that currently cannot be searched during HPO """
fixed_params = {
'num_epochs': 500, # maximum number of epochs for training NN
'epochs_wo_improve': 20, # we terminate training if validation performance hasn't improved in the last 'epochs_wo_improve' # of epochs
# TODO: Epochs could take a very long time, we may want smarter logic than simply # of epochs without improvement (slope, difference in score, etc.)
'seed_value': None, # random seed for reproducibility (set = None to ignore)
# For data processing:
'proc.embed_min_categories': 4, # apply embedding layer to categorical features with at least this many levels. Features with fewer levels are one-hot encoded. Choose big value to avoid use of Embedding layers
# Options: [3,4,10, 100, 1000]
'proc.impute_strategy': 'median', # # strategy argument of sklearn.SimpleImputer() used to impute missing numeric values
# Options: ['median', 'mean', 'most_frequent']
'proc.max_category_levels': 100, # maximum number of allowed levels per categorical feature
# Options: [10, 100, 200, 300, 400, 500, 1000, 10000]
'proc.skew_threshold': 0.99, # numerical features whose absolute skewness is greater than this receive special power-transform preprocessing. Choose big value to avoid using power-transforms
# Options: [0.2, 0.3, 0.5, 0.8, 1.0, 10.0, 100.0]
# Old params: These are now set based off of nthreads_per_trial, ngpus_per_trial.
# 'num_dataloading_workers': 1, # Will be overwritten by nthreads_per_trial, can be >= 1
# 'ctx': mx.cpu(), # Will be overwritten by ngpus_per_trial if unspecified (can alternatively be: mx.gpu())
}
return fixed_params
def get_hyper_params():
""" Parameters that currently can be tuned during HPO """
hyper_params = {
## Hyperparameters for neural net architecture:
'network_type': 'widedeep', # Type of neural net used to produce predictions
# Options: ['widedeep', 'feedforward']
'layers': None, # List of widths (num_units) for each hidden layer (Note: only specifies hidden layers. These numbers are not absolute, they will also be scaled based on number of training examples and problem type)
# Options: List of lists that are manually created
'numeric_embed_dim': None, # Size of joint embedding for all numeric+one-hot features.
# Options: integer values between 10-10000
'activation': 'relu', # Activation function
# Options: ['relu', 'softrelu', 'tanh', 'softsign']
'max_layer_width': 2056, # maximum number of hidden units in network layer (integer > 0)
# Does not need to be searched by default
'embedding_size_factor': 1.0, # scaling factor to adjust size of embedding layers (float > 0)
# Options: range[0.01 - 100] on log-scale
'embed_exponent': 0.56, # exponent used to determine size of embedding layers based on # categories.
'max_embedding_dim': 100, # maximum size of embedding layer for a single categorical feature (int > 0).
## Regression-specific hyperparameters:
'y_range': None, # Tuple specifying whether (min_y, max_y). Can be = (-np.inf, np.inf).
# If None, inferred based on training labels. Note: MUST be None for classification tasks!
'y_range_extend': 0.05, # Only used to extend size of inferred y_range when y_range = None.
## Hyperparameters for neural net training:
'use_batchnorm': True, # whether or not to utilize Batch-normalization
# Options: [True, False]
'dropout_prob': 0.1, # dropout probability, = 0 turns off Dropout.
# Options: range(0.0, 0.5)
'batch_size': 512, # batch-size used for NN training
# Options: [32, 64, 128. 256, 512, 1024, 2048]
'loss_function': None, # MXNet loss function minimized during training
'optimizer': 'adam', # MXNet optimizer to use.
# Options include: ['adam','sgd']
'learning_rate': 3e-4, # learning rate used for NN training (float > 0)
'weight_decay': 1e-6, # weight decay regularizer (float > 0)
'clip_gradient': 100.0, # gradient clipping threshold (float > 0)
'momentum': 0.9, # momentum which is only used for SGD optimizer
'lr_scheduler': None, # If not None, string specifying what type of learning rate scheduler to use (may override learning_rate).
# Options: [None, 'cosine', 'step', 'poly', 'constant']
# Below are hyperparameters specific to the LR scheduler (only used if lr_scheduler != None). For more info, see: https://gluon-cv.mxnet.io/api/utils.html#gluoncv.utils.LRScheduler
'base_lr': 3e-5, # smallest LR (float > 0)
'target_lr': 1.0, # largest LR (float > 0)
'lr_decay': 0.1, # step factor used to decay LR (float in (0,1))
'warmup_epochs': 10, # number of epochs at beginning of training in which LR is linearly ramped up (float > 1).
## Feature-specific hyperparameters:
'use_ngram_features': False, # If False, will drop automatically generated ngram features from language features. This results in worse model quality but far faster inference and training times.
# Options: [True, False]
}
return hyper_params
# Note: params for original NNTabularModel were:
# weight_decay=0.01, dropout_prob = 0.1, batch_size = 2048, lr = 1e-2, epochs=30, layers= [200, 100] (semi-equivalent to our layers = [100],numeric_embed_dim=200)
def get_default_param(problem_type, num_classes=None):
if problem_type == BINARY:
return get_param_binary()
elif problem_type == MULTICLASS:
return get_param_multiclass(num_classes=num_classes)
elif problem_type == REGRESSION:
return get_param_regression()
else:
return get_param_binary()
def get_param_multiclass(num_classes):
params = get_fixed_params()
params.update(get_hyper_params())
return params
def get_param_binary():
params = get_fixed_params()
params.update(get_hyper_params())
return params
def get_param_regression():
params = get_fixed_params()
params.update(get_hyper_params())
return params
|
the-stack_0_16399
|
"""
The Ravel backend PostgreSQL database
"""
import psycopg2
from ravel.log import logger
from ravel.util import resource_file
ISOLEVEL = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
BASE_SQL = resource_file("ravel/sql/base.sql")
FLOW_SQL = resource_file("ravel/sql/flows.sql")
NOFLOW_SQL = resource_file("ravel/sql/noflows.sql")
TOPO_SQL = resource_file("ravel/sql/topo.sql")
AUXILIARY_FUN_SQL = resource_file("ravel/sql/auxiliary_functions.sql")
class RavelDb():
"""A representation of Ravel's backend PostgreSQL database."""
def __init__(self, name, user, base, passwd=None, reconnect=False):
"""name: the name of the database to connect to
user: the username to use to connect
base: a file containing the SQL implementation for Ravel's base
passwd: the password to connect to the database
reconnect: true to connect to an existing database setup, false
to load a new instance of Ravel's base into the database"""
self.name = name
self.user = user
self.passwd = passwd
self.base = base
self.cleaned = not reconnect
self._cursor = None
self._conn = None
if not reconnect and self.num_connections() > 0:
logger.warning("existing connections to database, skipping reinit")
self.cleaned = False
elif not reconnect:
self.init()
self.cleaned = True
@property
def conn(self):
"returns: a psycopg2 connection to the PostgreSQL database"
if not self._conn or self._conn.closed:
self._conn = psycopg2.connect(database=self.name,
user=self.user,
password=self.passwd)
self._conn.set_isolation_level(ISOLEVEL)
return self._conn
@property
def cursor(self):
"""returns: a psycopg2 cursor from RavelDb.conn for the PostgreSQL
database"""
if not self._cursor or self._cursor.closed:
self._cursor = self.conn.cursor()
return self._cursor
def num_connections(self):
"""Returns the number of existing connections to the database. If
there are >1 connections, a new Ravel base implementation cannot be
loaded into the database.
returns: the number of existing connections to the database"""
try:
self.cursor.execute("SELECT * FROM pg_stat_activity WHERE "
"datname='{0}'".format(self.name))
# ignore cursor connection
return len(self.cursor.fetchall()) - 1
except psycopg2.DatabaseError as e:
logger.warning("error loading schema: %s", self.fmt_errmsg(e))
return 0
def init(self):
"""Initialize the database with the base Ravel SQL implementation.
Removes any existing Ravel objects from the database"""
self.clean()
self.create()
self.add_extensions()
self.load_schema(self.base)
def load_schema(self, script):
"""Load the specified schema into the database"
script: path to a SQL script"""
try:
s = open(script, "r").read()
logger.debug("loaded schema %s", script)
self.cursor.execute(s)
except psycopg2.DatabaseError as e:
logger.warning("error loading schema: %s", self.fmt_errmsg(e))
def load_topo(self, provider):
"""Load a topology from the specified network provider
provider: a ravel.network.NetworkProvider instance"""
topo = provider.topo
try:
node_count = 0
nodes = {}
for sw in topo.switches():
node_count += 1
dpid = provider.getNodeByName(sw).dpid
ip = provider.getNodeByName(sw).IP()
mac = provider.getNodeByName(sw).MAC()
nodes[sw] = node_count
self.cursor.execute("INSERT INTO switches (sid, dpid, ip, mac, name) "
"VALUES ({0}, '{1}', '{2}', '{3}', '{4}');"
.format(node_count, dpid, ip, mac, sw))
for host in topo.hosts():
node_count += 1
ip = provider.getNodeByName(host).IP()
mac = provider.getNodeByName(host).MAC()
nodes[host] = node_count
self.cursor.execute("INSERT INTO hosts (hid, ip, mac, name) "
"VALUES ({0}, '{1}', '{2}', '{3}');"
.format(node_count, ip, mac, host))
for link in topo.links():
h1,h2 = link
if h1 in topo.switches() and h2 in topo.switches():
ishost = 0
else:
ishost = 1
sid = nodes[h1]
nid = nodes[h2]
self.cursor.execute("INSERT INTO tp(sid, nid, ishost, isactive) "
"VALUES ({0}, {1}, {2}, {3});"
.format(sid, nid, ishost, 1))
# bidirectional edges
self.cursor.execute("INSERT INTO tp(sid, nid, ishost, isactive) "
"VALUES ({1}, {0}, {2}, {3});"
.format(sid, nid, ishost, 1))
self.cursor.execute("INSERT INTO ports(sid, nid, port) "
"VALUES ({0}, {1}, {2}), ({1}, {0}, {3});"
.format(sid, nid,
topo.port(h1, h2)[0],
topo.port(h1, h2)[1]))
except psycopg2.DatabaseError as e:
logger.warning("error loading topology: %s", self.fmt_errmsg(e))
def create(self):
"""If not created, create a database with the name specified in
the constructor"""
conn = None
try:
conn = psycopg2.connect(database="postgres",
user=self.user,
password=self.passwd)
conn.set_isolation_level(ISOLEVEL)
cursor = conn.cursor()
cursor.execute("SELECT datname FROM pg_database WHERE " +
"datistemplate = false;")
fetch = cursor.fetchall()
dblist = [fetch[i][0] for i in range(len(fetch))]
if self.name not in dblist:
cursor.execute("CREATE DATABASE %s;" % self.name)
logger.debug("created databse %s", self.name)
except psycopg2.DatabaseError as e:
logger.warning("error creating database: %s", self.fmt_errmsg(e))
finally:
conn.close()
def add_extensions(self):
"""If not already added, add extensions required by Ravel (plpython3u,
postgis, pgrouting)"""
try:
self.cursor.execute("SELECT 1 FROM pg_catalog.pg_namespace n JOIN " +
"pg_catalog.pg_proc p ON pronamespace = n.oid " +
"WHERE proname = 'pgr_dijkstra';")
fetch = self.cursor.fetchall()
if fetch == []:
self.cursor.execute("CREATE EXTENSION IF NOT EXISTS plpython3u;")
self.cursor.execute("CREATE EXTENSION IF NOT EXISTS postgis;")
self.cursor.execute("CREATE EXTENSION IF NOT EXISTS pgrouting;")
self.cursor.execute("CREATE EXTENSION plsh;")
logger.debug("created extensions")
except psycopg2.DatabaseError as e:
logger.warning("error loading extensions: %s", self.fmt_errmsg(e))
def clean(self):
"""Clean the database of any existing Ravel components"""
# close existing connections
self.conn.close()
conn = None
try:
conn = psycopg2.connect(database="postgres",
user=self.user,
password=self.passwd)
conn.set_isolation_level(ISOLEVEL)
cursor = conn.cursor()
cursor.execute("drop database %s" % self.name)
except psycopg2.DatabaseError as e:
logger.warning("error cleaning database: %s", self.fmt_errmsg(e))
finally:
if conn:
conn.close()
def truncate(self):
"""Clean the database of any state Ravel components, except for
topology tables. This rolls back the database to the state after
the topology is first loaded"""
try:
tables = ["cf", "clock", "p_spv", "spatial_ref_sys", "spv_tb_del",
"spv_tb_ins", "rm", "rm_delta", "urm"]
self.cursor.execute("truncate %s;" % ", ".join(tables))
logger.debug("truncated tables")
self.cursor.execute("INSERT INTO clock values (0);")
except psycopg2.DatabaseError as e:
logger.warning("error truncating databases: %s", self.fmt_errmsg(e))
def fmt_errmsg(self, exception):
return str(exception).strip()
|
the-stack_0_16401
|
import numpy as np
from .customKF import CustomKF
class CustomRTS():
def __init__(self, z, del_t):
self.z = z
self.del_t = del_t
def run(self, initial_mean, initial_variance, Q, sigma_square):
# Forward batch filter
kf = CustomKF(Q, sigma_square)
prior_means, prior_variances, post_means, post_variances = kf.batch_filter(initial_mean, initial_variance, self.z, self.del_t)
num_samples = len(self.z)
# Smoother
S = [0 for _ in range(num_samples)]
smoothed_means = [0 for _ in range(num_samples)]
smoothed_variances = [0 for _ in range(num_samples)]
smoothed_means[-1] = post_means[-1]
smoothed_variances[-1] = post_variances[-1]
for j in range(num_samples - 2, -1, -1):
S[j] = post_variances[j] * (1 / prior_variances[j+1])
smoothed_means[j] = post_means[j] + S[j] * (smoothed_means[j+1] - prior_means[j+1])
smoothed_variances[j] = post_variances[j] + S[j] * S[j] * (smoothed_variances[j+1] - prior_variances[j+1])
K = (self.del_t[-1] ** 2) * prior_variances[-1] + (sigma_square ** 2) * self.del_t[-1]
M = [0 for _ in range(num_samples)]
M[-1] = (1 - K * self.del_t[-1]) * post_variances[-2]
for j in range(num_samples-2, 0, -1):
M[j] = post_variances[j] * S[j-1] + S[j] * (M[j+1] - post_variances[j]) * S[j-1]
expected_η = [0 for _ in range(num_samples)]
expected_η_square = [0 for _ in range(num_samples)]
expected_η_η_1 = [0 for _ in range(num_samples)]
for j in range(num_samples-1, -1, -1):
expected_η[j] = smoothed_means[j]
expected_η_square[j] = smoothed_means[j]**2 + smoothed_variances[j]
if j != 0:
expected_η_η_1[j] = smoothed_means[j] * smoothed_means[j-1] + M[j]
return expected_η, expected_η_square, expected_η_η_1
|
the-stack_0_16402
|
from setuptools import find_packages, setup
with open("README.md", "r") as f:
README = f.read()
setup(
name='yappa',
version='0.4.19',
url='https://github.com/turokg/yappa',
description='Easy serverless deploy of python web applications',
long_description_content_type="text/markdown",
long_description=README,
author='Egor Korovin',
author_email='[email protected]',
packages=find_packages(),
install_requires=[
'boto3>=1.10',
'click>=8.0',
'httpx>=0.18',
'yandexcloud>=0.102.1',
'boltons>=21.0',
'idna<3,>=2.5',
"PyYAML>=5.0",
"furl>=2.0",
"pytz>=2021"
],
python_requires='>=3.8.0',
entry_points={'console_scripts': ['yappa = yappa.cli:cli']},
license="MIT",
package_data={'yappa': ['*.yaml']},
include_package_data=True,
)
|
the-stack_0_16403
|
"""Support for Xiaomi Mi Air Quality Monitor (PM2.5)."""
import logging
from miio import AirQualityMonitor, DeviceException
import voluptuous as vol
from homeassistant.components.air_quality import PLATFORM_SCHEMA, AirQualityEntity
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_MODEL,
DOMAIN,
MODEL_AIRQUALITYMONITOR_B1,
MODEL_AIRQUALITYMONITOR_S1,
MODEL_AIRQUALITYMONITOR_V1,
)
from .device import XiaomiMiioEntity
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Miio Air Quality Monitor"
ATTR_CO2E = "carbon_dioxide_equivalent"
ATTR_TVOC = "total_volatile_organic_compounds"
ATTR_TEMP = "temperature"
ATTR_HUM = "humidity"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
PROP_TO_ATTR = {
"carbon_dioxide_equivalent": ATTR_CO2E,
"total_volatile_organic_compounds": ATTR_TVOC,
"temperature": ATTR_TEMP,
"humidity": ATTR_HUM,
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Import Miio configuration from YAML."""
_LOGGER.warning(
"Loading Xiaomi Miio Air Quality via platform setup is deprecated. "
"Please remove it from your configuration"
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config,
)
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Xiaomi Air Quality from a config entry."""
entities = []
if config_entry.data[CONF_FLOW_TYPE] == CONF_DEVICE:
host = config_entry.data[CONF_HOST]
token = config_entry.data[CONF_TOKEN]
name = config_entry.title
model = config_entry.data[CONF_MODEL]
unique_id = config_entry.unique_id
_LOGGER.debug("Initializing with host %s (token %s...)", host, token[:5])
device = AirQualityMonitor(host, token, model=model)
if model == MODEL_AIRQUALITYMONITOR_S1:
entities.append(AirMonitorS1(name, device, config_entry, unique_id))
elif model == MODEL_AIRQUALITYMONITOR_B1:
entities.append(AirMonitorB1(name, device, config_entry, unique_id))
elif model == MODEL_AIRQUALITYMONITOR_V1:
entities.append(AirMonitorV1(name, device, config_entry, unique_id))
else:
_LOGGER.warning("AirQualityMonitor model '%s' is not yet supported", model)
async_add_entities(entities, update_before_add=True)
class AirMonitorB1(XiaomiMiioEntity, AirQualityEntity):
"""Air Quality class for Xiaomi cgllc.airmonitor.b1 device."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the entity."""
super().__init__(name, device, entry, unique_id)
self._icon = "mdi:cloud"
self._available = None
self._air_quality_index = None
self._carbon_dioxide = None
self._carbon_dioxide_equivalent = None
self._particulate_matter_2_5 = None
self._total_volatile_organic_compounds = None
self._temperature = None
self._humidity = None
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._carbon_dioxide_equivalent = state.co2e
self._particulate_matter_2_5 = round(state.pm25, 1)
self._total_volatile_organic_compounds = round(state.tvoc, 3)
self._temperature = round(state.temperature, 2)
self._humidity = round(state.humidity, 2)
self._available = True
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
@property
def icon(self):
"""Return the icon to use for device if any."""
return self._icon
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def air_quality_index(self):
"""Return the Air Quality Index (AQI)."""
return self._air_quality_index
@property
def carbon_dioxide(self):
"""Return the CO2 (carbon dioxide) level."""
return self._carbon_dioxide
@property
def carbon_dioxide_equivalent(self):
"""Return the CO2e (carbon dioxide equivalent) level."""
return self._carbon_dioxide_equivalent
@property
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self._particulate_matter_2_5
@property
def total_volatile_organic_compounds(self):
"""Return the total volatile organic compounds."""
return self._total_volatile_organic_compounds
@property
def temperature(self):
"""Return the current temperature."""
return self._temperature
@property
def humidity(self):
"""Return the current humidity."""
return self._humidity
@property
def extra_state_attributes(self):
"""Return the state attributes."""
data = {}
for prop, attr in PROP_TO_ATTR.items():
value = getattr(self, prop)
if value is not None:
data[attr] = value
return data
class AirMonitorS1(AirMonitorB1):
"""Air Quality class for Xiaomi cgllc.airmonitor.s1 device."""
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._carbon_dioxide = state.co2
self._particulate_matter_2_5 = state.pm25
self._total_volatile_organic_compounds = state.tvoc
self._temperature = state.temperature
self._humidity = state.humidity
self._available = True
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
class AirMonitorV1(AirMonitorB1):
"""Air Quality class for Xiaomi cgllc.airmonitor.s1 device."""
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._air_quality_index = state.aqi
self._available = True
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.