content
stringlengths 5
1.05M
|
---|
import sys, traceback
import datetime
from time import sleep
import daemon
import click
from corr.main import core
class CoRRTask:
def __init__(self, pid=None, name=None, refresh=10, aid=None, origin=None, tag=None, clnk_module=None, api_module=None, elnk_module=None, timeout=60*60):
self.pid = pid
self.origin = origin
self.aid = aid
self.name = name
self.refresh = refresh
self.root = None
self.history = []
self.tag = tag
self.record = ''
self.info = None
self.timeout = timeout
self.clnk_module = clnk_module
self.link = elnk_module.ExecLink(tag=tag, watcher='corrTask')
extensions = core.read_extend()
self.api_module = api_module
self.records = []
self.request = {}
self.ios = {'inputs':[], 'outputs':[]}
def sync_io(self, config):
for _input in self.ios['inputs']:
api_response = self.api_module.upload_file(
config=config,
path=_input,
group='input',
obj=self.record)
for _output in self.ios['outputs']:
api_response = self.api_module.upload_file(
config=config,
path=_output,
group='output',
obj=self.record)
def run(self):
found = False
duration = 0
project = None
config = None
while True:
running = False
self.info = self.link.record()
if self.info:
found = True
running = True
core.write_repo(self.name, self.info)
config = core.read_config('default')
registrations = core.read_reg('default')
# # print self.name
# # print self.tag
regs = self.clnk_module.find_by(
regs=registrations,
name=self.name,
tag=self.tag)
# # print "Record: {0}".format(self.record)
# # print registrations
# # print regs
if len(regs) > 0:
project = registrations[regs[0]]['project']
if project:
if self.link.updated:
# print self.tag
for data in self.info['io_files']:
if data[3] in ['r', 'r+', 'a+'] and data[0] not in self.ios['inputs']:
self.ios['inputs'].append(data[0])
if data[3] in ['w', 'w+', 'a', 'a+'] and data[0] not in self.ios['outputs']:
self.ios['outputs'].append(data[0])
try:
self.request['inputs'] = [
{
'input':data
} for data in self.info['io_files'] if data[3] in ['r', 'r+', 'a+']]
except:
self.request['inputs'] = []
try:
self.request['outputs'] = [
{
'output':data
} for data in self.info['io_files'] if data[3] in ['w', 'w+', 'a', 'a+']]
except:
self.request['outputs'] = []
try:
self.request['dependencies'] = [
{
'dependency':data
} for data in self.info['libraries']]
except:
self.request['dependencies'] = []
self.request['status'] = self.info['status']
self.request['extend'] = {}
self.request['extend']['children'] = self.info['children']
self.request['extend']['network'] = self.info['network']
self.request['extend']['cp_purcentage'] = self.info['cp_purcentage']
self.request['extend']['mem_purcentage'] = self.info['mem_purcentage']
self.request['extend']['threads'] = self.info['threads']
api_response = self.api_module.record_update(
config=config,
record=self.record,
request=self.request)
self.records.append(self.request)
# print "Record updated"
if not api_response[0]:
# # print "Error: Watcher recording create process failed."
# # print api_response[1]
pass
else:
self.request['label'] = self.tag
self.request['tags'] = [self.tag]
# print self.tag
self.request['system'] = self.info['computer']
for data in self.info['io_files']:
if data[3] in ['r', 'r+', 'a+'] and data[0] not in self.ios['inputs']:
self.ios['inputs'].append(data[0])
if data[3] in ['w', 'w+', 'a', 'a+'] and data[0] not in self.ios['outputs']:
self.ios['outputs'].append(data[0])
try:
self.request['inputs'] = [
{
'input':data
} for data in self.info['io_files'] if data[3] in ['r', 'r+', 'a+']]
except:
self.request['inputs'] = []
try:
self.request['outputs'] = [
{
'output':data
} for data in self.info['io_files'] if data[3] in ['w', 'w+', 'a', 'a+']]
except:
self.request['outputs'] = []
try:
self.request['dependencies'] = [
{
'dependency':data
} for data in self.info['libraries']]
except:
self.request['dependencies'] = []
self.request['status'] = self.info['status']
self.request['access'] = 'private'
self.request['execution'] = {
'cmdline':self.info['cmdline'],
'executable':self.info['executable'],
'path':self.info['path'],
'name':self.info['name']}
self.request['extend'] = {}
self.request['extend']['children'] = self.info['children']
self.request['extend']['network'] = self.info['network']
self.request['extend']['cp_purcentage'] = self.info['cp_purcentage']
self.request['extend']['mem_purcentage'] = self.info['mem_purcentage']
self.request['extend']['threads'] = self.info['threads']
api_response = self.api_module.record_create(
config=config,
project=project,
request=self.request)
# print "Record created"
self.records.append(self.request)
if api_response[0]:
self.record = api_response[1]['head']['id']
else:
# # print "Error: Watcher recording create process failed."
# print api_response[1]
# pass
pass
if self.info['status'] in ['killed', 'terminated', 'stoped']: #'sleeping',
running = False
else:
# print "Error: Unable to find the project."
pass
else:
# print "No info!!!"
pass
if found and not running:
break
sleep(self.refresh)
duration += self.refresh
if duration >= self.timeout:
break
self.sync_io(config)
return self.records
@click.command()
@click.option('--name', default=None, help="Watched software name.")
@click.option('--tag', default=None, help="Watched process tag.")
@click.option('--delay', default=None, help="Watching delay.")
@click.option('--aid', default=None, help="Backend api host.")
@click.option('--origin', default=None, help="Original process")
@click.option('--clnk', default=None, help="core linker")
@click.option('--api', default=None, help="api client")
@click.option('--elnk', default=None, help="execution linker")
def handle(name, tag, delay, aid, origin, elnk):
delay_value = 10
aid_value = None
origin_value = None
# # print "Name: {0}".format(name)
# # print "tag: {0}".format(tag)
stamp = str(datetime.datetime.now())
if delay:
delay_value = int(delay)
if name and tag and api and elnk and clnk:
clnk_module = core.extend_load(clnk)
elnk_module = core.extend_load(elnk)
api_module = core.extend_load(api)
task = CoRRTask(name=name, tag=tag, clnk_module=clnk_module, api_module=api_module, elnk_module=elnk_module)
# task.run()
try:
# # print "Loading watcher: {0}".format(task.tag)
with daemon.DaemonContext():
task.run()
except:
pass
if __name__ == '__corr.main__':
handle()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .shark import Shark
class Sawshark(Shark):
"""Sawshark.
All required parameters must be populated in order to send to Azure.
:param species:
:type species: str
:param length: Required.
:type length: float
:param siblings:
:type siblings: list[~bodycomplex.models.Fish]
:param fishtype: Required. Constant filled by server.
:type fishtype: str
:param age:
:type age: int
:param birthday: Required.
:type birthday: datetime
:param picture:
:type picture: bytearray
"""
_validation = {
'length': {'required': True},
'fishtype': {'required': True},
'birthday': {'required': True},
}
_attribute_map = {
'species': {'key': 'species', 'type': 'str'},
'length': {'key': 'length', 'type': 'float'},
'siblings': {'key': 'siblings', 'type': '[Fish]'},
'fishtype': {'key': 'fishtype', 'type': 'str'},
'age': {'key': 'age', 'type': 'int'},
'birthday': {'key': 'birthday', 'type': 'iso-8601'},
'picture': {'key': 'picture', 'type': 'bytearray'},
}
def __init__(self, **kwargs):
super(Sawshark, self).__init__(**kwargs)
self.picture = kwargs.get('picture', None)
self.fishtype = 'sawshark'
|
import logging
from dataclasses import dataclass
from typing import Dict, Hashable, Mapping, Set, Tuple, Union, overload
import xarray as xr
logger = logging.getLogger(__name__)
@dataclass(frozen=True, eq=False)
class Spec:
"""Root type Spec"""
default_name: str
__doc__: str
# Note: we want to prevent dev/users from mistakenly
# using Spec as a hashable obj in dict, xr.Dataset
__hash__ = None # type: ignore[assignment]
@dataclass(frozen=True, eq=False)
class ArrayLikeSpec(Spec):
"""ArrayLike type spec"""
kind: Union[None, str, Set[str]] = None
ndim: Union[None, int, Set[int]] = None
class SgkitVariables:
"""Holds registry of Sgkit variables, and can validate a dataset against a spec"""
registered_variables: Dict[Hashable, Spec] = {}
@classmethod
def register_variable(cls, spec: Spec) -> Tuple[str, Spec]:
"""Register variable spec"""
if spec.default_name in cls.registered_variables:
raise ValueError(f"`{spec.default_name}` already registered")
cls.registered_variables[spec.default_name] = spec
return spec.default_name, spec
@classmethod
@overload
def _validate(
cls,
xr_dataset: xr.Dataset,
*specs: Mapping[Hashable, Spec],
) -> xr.Dataset:
"""
Validate that xr_dataset contains array(s) of interest with alternative
variable name(s). To validate all variables in the dataset, skip `specs`.
"""
... # pragma: no cover
@classmethod
@overload
def _validate(cls, xr_dataset: xr.Dataset, *specs: Spec) -> xr.Dataset:
"""
Validate that xr_dataset contains array(s) of interest with default
variable name(s). To validate all variables in the dataset, skip `specs`.
"""
... # pragma: no cover
@classmethod
@overload
def _validate(cls, xr_dataset: xr.Dataset, *specs: Hashable) -> xr.Dataset:
"""
Validate that xr_dataset contains array(s) of interest with variable
name(s). Variable must be registered in `SgkitVariables.registered_variables`.
To validate all variables in the dataset, skip `specs`.
"""
... # pragma: no cover
@classmethod
def _validate(
cls,
xr_dataset: xr.Dataset,
*specs: Union[Spec, Mapping[Hashable, Spec], Hashable],
) -> xr.Dataset:
return cls._check_dataset(xr_dataset, False, *specs)
@classmethod
def _annotate(
cls,
xr_dataset: xr.Dataset,
*specs: Union[Spec, Mapping[Hashable, Spec], Hashable],
) -> xr.Dataset:
"""
Validate that xr_dataset contains array(s) of interest with variable
name(s), and annotate variables with a `comment` attribute containing
their doc comments.
Variable must be registered in `SgkitVariables.registered_variables`.
To validate all variables in the dataset, skip `specs`.
"""
return cls._check_dataset(xr_dataset, True, *specs)
@classmethod
def _check_dataset(
cls,
xr_dataset: xr.Dataset,
add_comment_attr: bool,
*specs: Union[Spec, Mapping[Hashable, Spec], Hashable],
) -> xr.Dataset:
if len(specs) == 0:
specs = tuple(xr_dataset.variables.keys())
logger.debug(f"No specs provided, will validate all variables: {specs}")
for s in specs:
if isinstance(s, Spec):
cls._check_field(
xr_dataset, s, s.default_name, add_comment_attr=add_comment_attr
)
elif isinstance(s, Mapping):
for fname, field_spec in s.items():
cls._check_field(
xr_dataset, field_spec, fname, add_comment_attr=add_comment_attr
)
elif s:
try:
field_spec = cls.registered_variables[s]
cls._check_field(
xr_dataset,
field_spec,
field_spec.default_name,
add_comment_attr=add_comment_attr,
)
except KeyError:
if s in xr_dataset.indexes.keys():
logger.debug(f"Ignoring missing spec for index: {s}")
else:
raise ValueError(f"No array spec registered for {s}")
return xr_dataset
@classmethod
def _check_field(
cls,
xr_dataset: xr.Dataset,
field_spec: Spec,
field: Hashable,
add_comment_attr: bool = False,
) -> None:
from sgkit.utils import check_array_like
assert isinstance(
field_spec, ArrayLikeSpec
), "ArrayLikeSpec is the only currently supported variable spec"
try:
arr = xr_dataset[field]
try:
check_array_like(arr, kind=field_spec.kind, ndim=field_spec.ndim)
if add_comment_attr and field_spec.__doc__ is not None:
arr.attrs["comment"] = field_spec.__doc__.strip()
except (TypeError, ValueError) as e:
raise ValueError(
f"{field} does not match the spec, see the error above for more detail"
) from e
except KeyError:
raise ValueError(f"{field} not present in {xr_dataset}")
validate = SgkitVariables._validate
"""Shortcut for SgkitVariables.validate"""
annotate = SgkitVariables._annotate
"""Shortcut for SgkitVariables.annotate"""
"""
We define xr.Dataset variables used in the sgkit methods below,
these definitions:
* provide documentation
* specify shapes/types of data
* are used for internal input/output validation
Users writing their own methods do not have to use the validation
if they don't want to.
Regarding documentation, the first sentence of the docstring should
be a short summary (one sentence), it will appear on the global variable
summary page. The rest of the docstring will appear on the variable
specific page.
"""
call_allele_count, call_allele_count_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"call_allele_count",
ndim=3,
kind="u",
__doc__="""
Allele counts. With shape (variants, samples, alleles) and values
corresponding to the number of non-missing occurrences of each allele.
""",
)
)
call_allele_frequency, call_allele_frequency_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"call_allele_frequency",
ndim=3,
kind="f",
__doc__="""
Allele frequencies. With shape (variants, samples, alleles) and values
corresponding to the frequencies of non-missing occurrences of each allele.
""",
)
)
call_dosage, call_dosage_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"call_dosage",
kind="f",
ndim=2,
__doc__="""Dosages, encoded as floats, with NaN indicating a missing value.""",
)
)
call_dosage_mask, call_dosage_mask_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"call_dosage_mask",
kind="b",
ndim=2,
__doc__="""A flag for each call indicating which values are missing.""",
)
)
call_genotype, call_genotype_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"call_genotype",
kind="i",
ndim=3,
__doc__="""
Call genotype. Encoded as allele values (0 for the reference, 1 for
the first allele, 2 for the second allele), -1 to indicate a
missing value, or -2 to indicate a non allele in mixed ploidy datasets.
""",
)
)
call_genotype_mask, call_genotype_mask_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"call_genotype_mask",
kind="b",
ndim=3,
__doc__="""A flag for each call indicating which values are missing.""",
)
)
(call_genotype_fill, call_genotype_fill_spec,) = SgkitVariables.register_variable(
ArrayLikeSpec(
"call_genotype_fill",
kind="b",
ndim=3,
__doc__="""
A flag for each allele position within mixed ploidy call genotypes
indicating fill (non-allele) values of lower ploidy calls.
""",
)
)
call_genotype_phased, call_genotype_phased_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"call_genotype_phased",
kind="b",
ndim=2,
__doc__="""
A flag for each call indicating if it is phased or not. If omitted
all calls are unphased.
""",
)
)
call_genotype_complete, call_genotype_complete_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"call_genotype_complete",
kind="i",
ndim=3,
__doc__="""
Call genotypes in which partial genotype calls are replaced with
completely missing genotype calls.
""",
)
)
(
call_genotype_complete_mask,
call_genotype_complete_mask_spec,
) = SgkitVariables.register_variable(
ArrayLikeSpec(
"call_genotype_complete_mask",
kind="b",
ndim=3,
__doc__="""A flag for each call indicating which values are missing.""",
)
)
(
call_genotype_probability,
call_genotype_probability_spec,
) = SgkitVariables.register_variable(
ArrayLikeSpec(
"call_genotype_probability",
kind="f",
ndim=3,
__doc__="""Genotype probabilities.""",
)
)
(
call_genotype_probability_mask,
call_genotype_probability_mask_spec,
) = SgkitVariables.register_variable(
ArrayLikeSpec(
"call_genotype_probability_mask",
kind="b",
ndim=3,
__doc__="""A flag for each call indicating which values are missing.""",
)
)
call_heterozygosity, call_heterozygosity_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"call_heterozygosity",
kind="f",
ndim=2,
__doc__="""
Observed heterozygosity of each call genotype.
""",
)
)
call_ploidy, call_ploidy_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"call_ploidy",
kind="i",
ndim=2,
__doc__="Call genotype ploidy.",
)
)
cohort_allele_count, cohort_allele_count_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"cohort_allele_count", kind="i", ndim=3, __doc__="""Cohort allele counts."""
)
)
covariates, covariates_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"covariates",
ndim={1, 2},
__doc__="""
Covariate variable names. Must correspond to 1 or 2D dataset
variables of shape (samples[, covariates]). All covariate arrays
will be concatenated along the second axis (columns).
""",
)
)
dosage, dosage_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"dosage",
__doc__="""
Dosage variable name. Where "dosage" array can contain represent
one of several possible quantities, e.g.:
- Alternate allele counts
- Recessive or dominant allele encodings
- True dosages as computed from imputed or probabilistic variant calls
- Any other custom encoding in a user-defined variable
""",
)
)
genotype_count, genotype_count_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"genotype_count",
ndim=2,
kind="i",
__doc__="""
Genotype counts. Must correspond to an (`N`, 3) array where `N` is equal
to the number of variants and the 3 columns contain heterozygous,
homozygous reference, and homozygous alternate counts (in that order)
across all samples for a variant.
""",
)
)
stat_identity_by_state, stat_identity_by_state_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"stat_identity_by_state",
ndim=2,
kind="f",
__doc__="""
Pairwise IBS probabilities among all samples.
""",
)
)
ld_prune_index_to_drop, ld_prune_index_to_drop_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"ld_prune_index_to_drop",
ndim=1,
kind="i",
__doc__="""
Variant indexes to drop for LD prune.
""",
)
)
(
regenie_base_prediction,
regenie_base_prediction_spec,
) = SgkitVariables.register_variable(
ArrayLikeSpec(
"regenie_base_prediction",
ndim=4,
kind="f",
__doc__="""
REGENIE's base prediction (blocks, alphas, samples, outcomes). Stage 1
predictions from ridge regression reduction.
""",
)
)
(
regenie_loco_prediction,
regenie_loco_prediction_spec,
) = SgkitVariables.register_variable(
ArrayLikeSpec(
"regenie_loco_prediction",
ndim=3,
kind="f",
__doc__="""
REGENIE's regenie_loco_prediction (contigs, samples, outcomes). LOCO predictions
resulting from Stage 2 predictions ignoring effects for variant blocks on
held out contigs. This will be absent if the data provided does not contain
at least 2 contigs.
""",
)
)
(
regenie_meta_prediction,
regenie_meta_prediction_spec,
) = SgkitVariables.register_variable(
ArrayLikeSpec(
"regenie_meta_prediction",
ndim=2,
kind="f",
__doc__="""
REGENIE's regenie_meta_prediction (samples, outcomes). Stage 2 predictions from
the best meta estimator trained on the out-of-sample Stage 1 predictions.
""",
)
)
pc_relate_phi, pc_relate_phi_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"pc_relate_phi",
ndim=2,
kind="f",
__doc__="""PC Relate kinship coefficient matrix.""",
)
)
sample_call_rate, sample_call_rate_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"sample_call_rate",
ndim=1,
kind="f",
__doc__="""The fraction of variants with called genotypes.""",
)
)
sample_cohort, sample_cohort_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"sample_cohort",
ndim=1,
kind="i",
__doc__="""The index of the cohort that each sample belongs to.
A negative value indicates a sample is not a member of any cohort.""",
)
)
sample_id, sample_id_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"sample_id",
kind={"S", "U", "O"},
ndim=1,
__doc__="""The unique identifier of the sample.""",
)
)
sample_n_called, sample_n_called_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"sample_n_called",
ndim=1,
kind="i",
__doc__="""The number of variants with called genotypes.""",
)
)
sample_n_het, sample_n_het_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"sample_n_het",
ndim=1,
kind="i",
__doc__="""The number of variants with heterozygous calls.""",
)
)
sample_n_hom_alt, sample_n_hom_alt_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"sample_n_hom_alt",
ndim=1,
kind="i",
__doc__="""The number of variants with homozygous alternate calls.""",
)
)
sample_n_hom_ref, sample_n_hom_ref_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"sample_n_hom_ref",
ndim=1,
kind="i",
__doc__="""The number of variants with homozygous reference calls.""",
)
)
sample_n_non_ref, sample_n_non_ref_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"sample_n_non_ref",
ndim=1,
kind="i",
__doc__="""The number of variants that are not homozygous reference calls.""",
)
)
sample_pca_component, sample_pca_component_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"sample_pca_component",
ndim=2,
kind="f",
__doc__="""Principal axes defined as eigenvectors for sample covariance matrix.
In the context of SVD, these are equivalent to the right singular vectors in
the decomposition of a (N, M) matrix., i.e. ``dask_ml.decomposition.TruncatedSVD.components_``.""",
)
)
(
sample_pca_explained_variance,
sample_pca_explained_variance_spec,
) = SgkitVariables.register_variable(
ArrayLikeSpec(
"sample_pca_explained_variance",
ndim=1,
kind="f",
__doc__="""Variance explained by each principal component. These values are equivalent
to eigenvalues that result from the eigendecomposition of a (N, M) matrix,
i.e. ``dask_ml.decomposition.TruncatedSVD.explained_variance_``.""",
)
)
(
sample_pca_explained_variance_ratio,
sample_pca_explained_variance_ratio_spec,
) = SgkitVariables.register_variable(
ArrayLikeSpec(
"sample_pca_explained_variance_ratio",
ndim=1,
kind="f",
__doc__="""Ratio of variance explained to total variance for each principal component,
i.e. ``dask_ml.decomposition.TruncatedSVD.explained_variance_ratio_``.""",
)
)
sample_pca_loading, sample_pca_loading_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"sample_pca_loading",
ndim=2,
kind="f",
__doc__="""PCA loadings defined as principal axes scaled by square root of eigenvalues.
These values can also be interpreted as the correlation between the original variables
and unit-scaled principal axes.""",
)
)
sample_pca_projection, sample_pca_projection_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"sample_pca_projection",
ndim=2,
kind="f",
__doc__="""Projection of samples onto principal axes. This array is commonly
referred to as "scores" or simply "principal components (PCs)" for a set of samples.""",
)
)
sample_ploidy, sample_ploidy_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"sample_ploidy",
kind="i",
ndim=1,
__doc__="""Ploidy of each sample calculated from call genotypes across all variants
with -1 indicating variable ploidy.""",
)
)
stat_Fst, stat_Fst_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"stat_Fst",
ndim=3,
kind="f",
__doc__="""Fixation index (Fst) between pairs of cohorts.""",
)
)
stat_divergence, stat_divergence_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"stat_divergence",
ndim=3,
kind="f",
__doc__="""Genetic divergence between pairs of cohorts.""",
)
)
stat_diversity, stat_diversity_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"stat_diversity",
ndim=2,
kind="f",
__doc__="""Genetic diversity (also known as "Tajima’s pi") for cohorts.""",
)
)
stat_Garud_h1, stat_Garud_h1_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"stat_Garud_h1",
ndim={1, 2},
kind="f",
__doc__="""Garud H1 statistic for cohorts.""",
)
)
stat_Garud_h12, stat_Garud_h12_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"stat_Garud_h12",
ndim={1, 2},
kind="f",
__doc__="""Garud H12 statistic for cohorts.""",
)
)
stat_Garud_h123, stat_Garud_h123_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"stat_Garud_h123",
ndim={1, 2},
kind="f",
__doc__="""Garud H123 statistic for cohorts.""",
)
)
stat_Garud_h2_h1, stat_Garud_h2_h1_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"stat_Garud_h2_h1",
ndim={1, 2},
kind="f",
__doc__="""Garud H2/H1 statistic for cohorts.""",
)
)
(
stat_observed_heterozygosity,
stat_observed_heterozygosity_spec,
) = SgkitVariables.register_variable(
ArrayLikeSpec(
"stat_observed_heterozygosity",
kind="f",
ndim=2,
__doc__="""
Observed heterozygosity for cohorts.
""",
)
)
stat_pbs, stat_pbs_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"stat_pbs",
ndim=4,
kind="f",
__doc__="""Population branching statistic for cohort triples.""",
)
)
stat_Tajimas_D, stat_Tajimas_D_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"stat_Tajimas_D", ndim=2, kind="f", __doc__="""Tajima’s D for cohorts."""
)
)
stat_Weir_Goudet_beta, stat_Weir_Goudet_beta_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"stat_Weir_Goudet_beta",
ndim=2,
kind="f",
__doc__="""Pairwise Weir Goudet beta statistic among all samples.""",
)
)
traits, traits_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"traits",
ndim={1, 2},
__doc__="""
Trait (for example phenotype) variable names. Must all be continuous and
correspond to 1 or 2D dataset variables of shape (samples[, traits]).
2D trait arrays will be assumed to contain separate traits within columns
and concatenated to any 1D traits along the second axis (columns).
""",
)
)
variant_allele, variant_allele_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_allele",
kind={"S", "O"},
ndim=2,
__doc__="""The possible alleles for the variant.""",
)
)
variant_allele_count, variant_allele_count_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_allele_count",
ndim=2,
kind="u",
__doc__="""
Variant allele counts. With shape (variants, alleles) and values
corresponding to the number of non-missing occurrences of each allele.
""",
)
)
(
variant_allele_frequency,
variant_allele_frequency_spec,
) = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_allele_frequency",
ndim=2,
kind="f",
__doc__="""The frequency of the occurrence of each allele.""",
)
)
variant_allele_total, variant_allele_total_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_allele_total",
ndim=1,
kind="i",
__doc__="""The number of occurrences of all alleles.""",
)
)
variant_linreg_beta, variant_linreg_beta_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_linreg_beta",
__doc__="""Beta values associated with each variant and trait.""",
)
)
variant_linreg_effect, variant_linreg_effect_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_linreg_effect",
__doc__="""Effect size estimate for each variant and trait.""",
)
)
variant_call_rate, variant_call_rate_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_call_rate",
ndim=1,
kind="f",
__doc__="""The fraction of samples with called genotypes.""",
)
)
variant_contig, variant_contig_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_contig",
kind={"i", "u"},
ndim=1,
__doc__="""
Index corresponding to contig name for each variant. In some less common
scenarios, this may also be equivalent to the contig names if the data
generating process used contig names that were also integers.
""",
)
)
variant_hwe_p_value, variant_hwe_p_value_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_hwe_p_value",
kind="f",
__doc__="""P values from HWE test for each variant as float in [0, 1].""",
)
)
variant_id, variant_id_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_id",
kind={"S", "U", "O"},
ndim=1,
__doc__="""The unique identifier of the variant.""",
)
)
variant_n_called, variant_n_called_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_n_called",
ndim=1,
kind="i",
__doc__="""The number of samples with called genotypes.""",
)
)
variant_n_het, variant_n_het_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_n_het",
ndim=1,
kind="i",
__doc__="""The number of samples with heterozygous calls.""",
)
)
variant_n_hom_alt, variant_n_hom_alt_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_n_hom_alt",
ndim=1,
kind="i",
__doc__="""The number of samples with homozygous alternate calls.""",
)
)
variant_n_hom_ref, variant_n_hom_ref_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_n_hom_ref",
ndim=1,
kind="i",
__doc__="""The number of samples with homozygous reference calls.""",
)
)
variant_n_non_ref, variant_n_non_ref_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_n_non_ref",
ndim=1,
kind="i",
__doc__="""The number of samples that are not homozygous reference calls.""",
)
)
variant_linreg_p_value, variant_linreg_p_value_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_linreg_p_value", kind="f", __doc__="""P values as float in [0, 1]."""
)
)
variant_position, variant_position_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_position",
kind="i",
ndim=1,
__doc__="""The reference position of the variant.""",
)
)
variant_linreg_t_value, variant_linreg_t_value_spec = SgkitVariables.register_variable(
ArrayLikeSpec("variant_linreg_t_value", __doc__="""T statistics for each beta.""")
)
variant_ploidy, variant_ploidy_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_ploidy",
kind="i",
ndim=1,
__doc__="""Ploidy of each variant calculated from call genotypes across all samples
with -1 indicating variable ploidy.""",
)
)
variant_score, variant_score_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"variant_score",
ndim=1,
kind="f",
__doc__="""
Scores to prioritize variant selection when constructing an LD matrix.
""",
)
)
window_contig, window_contig_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"window_contig",
kind="i",
ndim=1,
__doc__="""The contig index of each window.""",
)
)
window_start, window_start_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"window_start",
kind="i",
ndim=1,
__doc__="""The index values of window start positions along the ``variants`` dimension.""",
)
)
window_stop, window_stop_spec = SgkitVariables.register_variable(
ArrayLikeSpec(
"window_stop",
kind="i",
ndim=1,
__doc__="""The index values of window stop positions along the ``variants`` dimension.""",
)
)
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="functional-functions",
version = "0.5.2",
author = "Lawrence Chin",
author_email = "[email protected]",
description = "Commonly used functions by the Compass FBI Team",
long_description = long_description,
long_description_content_type="text/markdown",
url="https://github.com/UrbanCompass/Functional-Functions",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
"snowflake-connector-python >= 2.1.1",
"snowflake-sqlalchemy >= 1.2.4",
"pandas >= 1.1.4",
"numpy >= 1.19.4",
"pytz >= 2020.4",
"pyarrow >= 0.17.1",
"boto3 >= 1.18.54",
"redshift-connector >= 2.0.902",
"databricks-sql-connector >= 0.9.3",
"python-dotenv>=0.19.0"
],
include_package_data=True,
package_data={'': ['settings.py.sample']}
) |
'''
Module defining update state for the UrbanScape class
'''
import numpy as np
#=============================
# |
# UrbanScape Agent Methods |
# |
#=============================
# Base class for all Food Agents
# agent has a location and wealth
class Agent(object):
def __init__(self, loc, wealth):
self.loc = loc # x,y coordinates of location in urbanscape
self.wealth = wealth # $ amount
# Called every time-step
def step(self, urbanscape):
self.check_bankruptcy(urbanscape)
# Remove the agent if broke
def check_bankruptcy(self, urbanscape):
if self.wealth < 0:
self.wealth = 0
urbanscape.remove_agent(self)
class FastFoodAgent(Agent):
operations = 50000 # $ per year
initial_wealth = 10000 # $ amount
radius = 2 # number of block effected around location
effect_coordinates = []
def __init__(self, loc, urbanscape):
super(FastFoodAgent, self).__init__(loc, self.initial_wealth)
self.define_radius(urbanscape)
self.operating_costs = (urbanscape.rent[loc]) + (FastFoodAgent.operations)
def step(self, urbanscape):
# 1. Gather revenues for the year
self.capture_revenue(urbanscape)
# 2. Perform parent class step function
super(FastFoodAgent, self).step(urbanscape)
#this function defines how much of fast food expenditures are
#captured by the FoodAgent. If there are no competing FastFoodAgents,
#assumes that FoodAgent captures all of block expenditures.
def define_radius(self, urbanscape):
self.effect_coordinates = urbanscape.effect_radius(self.loc, self.radius)
def capture_revenue(self, urbanscape):
x,y = self.loc
self.wealth += (urbanscape.capture_expenditures(self.effect_coordinates)[0]) - self.operating_costs
#capture_expenditures returns a list of two values, the 0th index being the ff_revenues
class GroceryStoreAgent(Agent):
operations = 200000 # $ per year
initial_wealth = 25000 # $ amount
radius = 2 # number of block effected around location
effect_coordinates = []
def __init__(self, loc, urbanscape):
super(GroceryStoreAgent, self).__init__(loc, self.initial_wealth)
self.define_radius(urbanscape)
self.operating_costs = (urbanscape.rent[loc]) + (GroceryStoreAgent.operations)
def step(self, urbanscape):
# 1. Gather revenues for the year
self.capture_revenue(urbanscape)
# 2. Perform parent class step function
super(GroceryStoreAgent, self).step(urbanscape)
#this function defines how much of grocery store expenditures are
#captured by the FoodAgent. If there are no competing FoodAgents,
#assumes that FoodAgent captures all of block expenditures.
def define_radius(self, urbanscape):
self.effect_coordinates = urbanscape.effect_radius(self.loc, self.radius)
#assumes that the rest of income spent on food away from home is spent on groceries
def capture_revenue(self, urbanscape):
x,y = self.loc
self.wealth += (urbanscape.capture_expenditures(self.effect_coordinates)[1]) - self.operating_costs
#capture_expenditures returns a list of two values, the 1st index being the ff_revenues |
from setuptools import setup
with open("README.md", 'r') as f:
long_description = f.read()
setup(
name='supervised-product-matching',
version='0.1',
description='Neural network for product matching, aka classifying whether two product titles represent the same entity.',
license="MIT",
long_description=long_description,
author='Jason Acheampong',
author_email='[email protected]',
url="https://github.com/Mascerade/supervised-product-matching",
packages=['supervised_product_matching', 'supervised_product_matching.model_architectures'],
install_requires=['torch',
'transformers',
'nltk',
'numpy',
'scale_transformer_encoder @ git+https://github.com/Mascerade/scale-transformer-encoder@f684132c63cf7f8d771decd6fb560c9158ced361#egg=scale_transformer_encoder',
'character_bert @ git+https://github.com/Mascerade/character-bert@c44d0f1e7d2e822296a0578eecba52ddadd22d0e#egg=character_bert']
) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @author: loricheung
import os
import math
import json
import urllib
import argparse
from alfred.feedback import Feedback
headers = {
'Host': 'frodo.douban.com',
'Content-Type': 'application/json',
'Connection': 'keep-alive',
'Accept': '*/*',
'User-Agent':' Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E217 MicroMessenger/6.8.0(0x16080000) NetType/WIFI Language/en Branch/Br_trunk MiniProgramEnv/Mac',
'Referer': 'https://servicewechat.com/wx2f9b06c1de1ccfca/84/page-frame.html',
'Accept-Language':' en-us',
'Cookie': 'll="108296"; bid=iytqR0heGuI'
}
search_mode = {
"v": ['movie', 'tv'],
"s": ['music'],
"b": ['book'],
"o": ['app', 'game', 'event', 'drama'],
"p": ['person'],
"all": ['movie', 'tv', 'music', 'book', 'app', 'game', 'event', 'drama', 'person']
}
target_url = {
"movie": "https://movie.douban.com/subject/",
"book": "https://book.douban.com/subject/",
"tv": "https://movie.douban.com/subject/",
"music": "https://music.douban.com/subject/",
"app": "https://www.douban.com/app/",
"game": "https://www.douban.com/game/",
"event": "https://www.douban.com/event/",
"drama": "https://www.douban.com/drama/",
}
participant = {
'movie': 1,
'book': 2,
'tv': 3,
'music': 4,
'app': 5,
'game': 6,
'event': 7,
'drama': 8,
'person': 9,
'doulist_cards': 10
}
def sorter(item):
try:
value = item['target']['rating']['value']
except:
value = -1
try:
year = item['target']['year']
except:
year = -1
return (participant[item['target_type']], -int(year), -float(value))
cache_folder = 'cache'
if not os.path.exists(cache_folder):
os.mkdir(cache_folder)
def clear():
for root, _, files in os.walk(cache_folder):
for name in files:
os.remove(os.path.join(cache_folder, name))
os.removedirs(root)
class Douban(object):
def __init__(self):
for _, _, files in os.walk(cache_folder):
for name in files:
os.remove(os.path.join(cache_folder, name))
def __del__(self):
pass
def _download_thumb(self, url):
if "?" in url:
url = url.split('?')[0]
return os.system('nohup curl --parallel --no-progress-meter --output-dir cache -O %s &' % url)
def search(self, keyword, mode=None):
request = urllib.request.Request("https://frodo.douban.com/api/v2/search/weixin?start=0&count=20&apiKey=0ac44ae016490db2204ce0a042db2916&q=" + urllib.parse.quote(keyword), data=None, headers=headers)
response = urllib.request.urlopen(request)
result = response.read().decode("utf-8")
data = json.loads(result)
feedback = Feedback()
if data['count'] > 0:
sort_data = sorted(data['items'], key=sorter)
for item in sort_data:
target_type = item["target_type"]
if mode:
query_mode = search_mode[mode]
else:
query_mode = search_mode['all']
if (target_type in target_url.keys() and target_type in query_mode):
url = target_url[target_type] + item["target"]["id"]
cover_url = item['target']['cover_url']
if '?' in cover_url:
cover_url = cover_url.split('?')[0]
cover = cover_url.split('/')[-1]
_ = self._download_thumb(cover_url)
title = item["target"]["title"]
star = item["target"]["rating"]["star_count"]
info = item["target"]["card_subtitle"]
decimal, integer = math.modf(float(star))
if decimal != 0.0:
star_info = (int(integer) * '★') + '☆'
else:
star_info = (int(integer) * '★')
icon = os.path.join(cache_folder, cover)
feedback.addItem(title=title + u' ' + star_info, subtitle=info, arg=url, icon=icon)
if len(feedback) == 0:
feedback.addItem(uid='0', title=u'未能搜索到结果, 请通过豆瓣搜索页面进行搜索', subtitle=u'按下回车键, 跳转到豆瓣', arg=u'https://search.douban.com/movie/subject_search?search_text=%s&cat=1002' % urllib.quote(keyword), icon='icon.png')
feedback.output()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('search', type=str)
args = parser.parse_args()
if args.search == 'c':
clear()
douban = Douban()
all_args = args.search.split()
if len(all_args) > 1:
mode, kw = all_args[0], all_args[1:]
if mode in search_mode.keys():
douban.search(keyword=' '.join(kw), mode=mode)
else:
douban.search(keyword=args.search)
else:
douban.search(keyword=args.search)
|
import configparser
import glob
import os
class Config():
def __init__(self, config_name):
super(Config, self).__init__()
self.config_name = config_name
self.init()
def init(self):
inilist = glob.glob("*.ini")
if not os.path.isdir("./playlist"):
os.mkdir("./playlist")
if not os.path.isdir("./data"):
os.mkdir("./data")
if len(inilist) == 0:
self.saveConfig(self.config_name)
else:
self.config = configparser.ConfigParser()
self.config.read(self.config_name)
def getConfig(self):
return self.config
def checkConfig(self):
"""
items = self.config.items('dir')
for item in items:
print(item)
"""
# For validation of the ini file
return True
def saveConfig(self, name='config.ini', path_library='C://Users/'+os.getlogin() + '/Music', path_data='./data',
path_playlist='./playlist/AnimeSongs.m3u', path_database='./info.db', width=800, height=480,
path_style="./styles/style.qss", language="ja"):
config = configparser.ConfigParser()
config.add_section('path')
config.set('path', 'library', path_library)
config.set('path', 'data', path_data)
config.set('path', 'playlist', path_playlist)
config.set('path', 'database', path_database)
config.add_section('screen')
config.set('screen', 'width', str(width))
config.set('screen', 'height', str(height))
config.add_section('general')
config.set('general', 'style', path_style)
config.set('general', 'language', language)
with open(name, 'w') as file:
config.write(file)
self.config = config
return config
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 08:19:09 2019
@author: amandaash
"""
import numpy as np
import matplotlib.pyplot as plt
def viral_load(A,B,alpha,beta, time):
V = A * np.exp(-alpha*time) + B * np.exp(-beta*time)
return V
A = [160000, 3000, 50000, 20000, 100000]
B = [10, 10, 10, 10, 10]
alpha = [0.45, -0.45, -0.1, 0.1, 5]
beta = [0.01, 0.01, 0.01, 0.01, 0.01]
t = np.arange(0,7.2,0.1)
parameter_array = np.column_stack((A,B,alpha,beta))
for parameter_set in parameter_array:
virus = viral_load(parameter_set[0], parameter_set[1], parameter_set[2], parameter_set[3], t)
plt.plot(t, virus, '.', label = r'A = {0}, B = {1}, $\alpha$ = {2}, $\beta$ = {3}'.format(parameter_set[0], parameter_set[1], parameter_set[2], parameter_set[3]))
plt.ylabel('viral load')
plt.xlabel('time')
plt.title('HIV virus concentration over time')
plt.legend()
plt.tight_layout()
plt.savefig('/Users/amandaash/Desktop/PHYS_3210/Week 02/HIV_plot.pdf')
plt.show() |
from __future__ import print_function
import os
import fixtures
import testtools
from common.contrail_test_init import ContrailTestInit
from contrail_fixtures import *
from common.connections import ContrailConnections
from testresources import ResourcedTestCase
from sanity_resource import SolnSetupResource
from tcutils.wrappers import preposttest_wrapper
from performance.verify import PerformanceTest
class PerformanceSanity(testtools.TestCase, ResourcedTestCase, PerformanceTest):
resources = [('base_setup', SolnSetupResource)]
def __init__(self, *args, **kwargs):
testtools.TestCase.__init__(self, *args, **kwargs)
self.res = SolnSetupResource.getResource()
self.inputs = self.res.inputs
self.connections = self.res.connections
self.logger = self.res.logger
self.nova_h = self.res.nova_h
self.analytics_obj = self.connections.analytics_obj
self.vnc_lib = self.connections.vnc_lib
self.quantum_h = self.connections.quantum_h
self.cn_inspect = self.connections.cn_inspect
def __del__(self):
print("Deleting test_with_setup now")
SolnSetupResource.finishedWith(self.res)
def setUp(self):
super(PerformanceSanity, self).setUp()
if 'TEST_CONFIG_FILE' in os.environ:
self.input_file = os.environ.get('TEST_CONFIG_FILE')
else:
self.input_file = 'params.ini'
def tearDown(self):
print("Tearing down test")
super(PerformanceSanity, self).tearDown()
SolnSetupResource.finishedWith(self.res)
def runTest(self):
pass
@preposttest_wrapper
def test_netperf_within_vn(self):
"""Check the throughput between the VM's within the same VN
1. Create VN and launch two instance within network
2. Set CPU to highest performance in compute nodes before running test
3. Run netperf command for fixed duration to find throughput
"""
return self.test_check_netperf_within_vn()
if __name__ == '__main__':
unittest.main()
|
"""
GeneratorConf
=============
This file defines the configurations to setup and run the Generator.
It contains a list of tasks needed to be done by the Generator. Each
task specification is specified by a list of variables in a dictionary.
Each dictionary has these configuration variables:
"TEMPLATE" - the specific Template class used.
"DATA_PROVIDER" - the specific DataProvider class used.
"DATA_RESOURCE" - the specific DataResource class used.
"HANDLER" - the specific Handler class used.
* all above variables are tuples with module and class names
"RESOURCES" - a list of dicts each of which is used to init a
DataResource instance.
"SAVE_PATH" - where to save the generated codes.
"""
TASKS = [
{
"NAME" : "Test",
"TEMPLATE" : ("Template", "Template"),
"DATA_PROVIDER" : ("DataProvider", "DataProvider"),
"DATA_RESOURCE" : ("DataResource", "DataResource"),
"HANDLER" : ("Handler", "BaseHandler"),
"RESOURCES" : [],
"SAVE_PATH" : "out",
},
]
|
import logging
class Logger:
def __init__(self, log_level):
log_format_string = "%(asctime)s [%(levelname)s] %(message)s"
log_formatter = logging.Formatter(log_format_string)
logging.basicConfig(level=log_level, format=log_format_string)
file_handler = logging.FileHandler("boxrec-scrapper.log")
file_handler.setFormatter(log_formatter)
file_handler.setLevel("DEBUG")
log = logging.getLogger()
log.addHandler(file_handler)
self.log = log
def _get_logger(self):
return self.log
@staticmethod
def get_logger(log_level):
return Logger(log_level)._get_logger()
|
import os
import time
import numpy as np
import math
from scipy.spatial.distance import cdist
import pickle
import paramiko
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
import torch.nn as nn
from data.Sketchy import sketchy_extended
from data.TUBerlin import tuberlin_extended
from data.DomainNet import domainnet
from data.dataloaders import BaselineDataset, CuMixloader
from data.sampler import BalancedSampler
from models.snmpnet.snmpnet import SnMpNet
from losses.embedding_losses import Mixup_Cosine_CCE, Mixup_Euclidean_MSE
from utils import utils
from utils.metrics import compute_retrieval_metrics
from utils.logger import AverageMeter
class Trainer:
def __init__(self, args):
self.args = args
print('\nLoading data...')
if args.dataset=='Sketchy':
data_input = sketchy_extended.create_trvalte_splits(args)
if args.dataset=='DomainNet':
data_input = domainnet.create_trvalte_splits(args)
if args.dataset=='TUBerlin':
data_input = tuberlin_extended.create_trvalte_splits(args)
self.tr_classes = data_input['tr_classes']
self.va_classes = data_input['va_classes']
semantic_vec = data_input['semantic_vec']
data_splits = data_input['splits']
np.random.seed(args.seed)
torch.manual_seed(args.seed)
use_gpu = torch.cuda.is_available()
if use_gpu:
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
# Imagenet standards
im_mean = [0.485, 0.456, 0.406]
im_std = [0.229, 0.224, 0.225]
# Image transformations
image_transforms = {
'train':
transforms.Compose([
transforms.RandomResizedCrop((args.image_size, args.image_size), (0.8, 1.0)),
transforms.RandomHorizontalFlip(0.5),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.4),
transforms.ToTensor(),
transforms.Normalize(im_mean, im_std)
]),
'eval':
transforms.Compose([
transforms.Resize((args.image_size, args.image_size)),
transforms.ToTensor(),
transforms.Normalize(im_mean, im_std)
]),
}
# class dictionary
self.dict_clss = utils.create_dict_texts(self.tr_classes)
fls_tr = data_splits['tr']
cls_tr = np.array([f.split('/')[-2] for f in fls_tr])
dom_tr = np.array([f.split('/')[-3] for f in fls_tr])
tr_domains_unique = np.unique(dom_tr)
# doamin dictionary
self.dict_doms = utils.create_dict_texts(tr_domains_unique)
print(self.dict_doms)
domain_ids = utils.numeric_classes(dom_tr, self.dict_doms)
data_train = CuMixloader(fls_tr, cls_tr, dom_tr, self.dict_doms, transforms=image_transforms['train'])
train_sampler = BalancedSampler(domain_ids, args.batch_size//len(tr_domains_unique), domains_per_batch=len(tr_domains_unique))
self.train_loader = DataLoader(dataset=data_train, batch_size=args.batch_size, sampler=train_sampler, num_workers=args.num_workers,
pin_memory=True)
data_va_query = BaselineDataset(data_splits['query_va'], transforms=image_transforms['eval'])
data_va_gallery = BaselineDataset(data_splits['gallery_va'], transforms=image_transforms['eval'])
# PyTorch valid loader for query
self.va_loader_query = DataLoader(dataset=data_va_query, batch_size=args.batch_size*5, shuffle=False, num_workers=args.num_workers,
pin_memory=True)
# PyTorch valid loader for gallery
self.va_loader_gallery = DataLoader(dataset=data_va_gallery, batch_size=args.batch_size*5, shuffle=False, num_workers=args.num_workers,
pin_memory=True)
print(f'#Tr samples:{len(data_train)}; #Val queries:{len(data_va_query)}; #Val gallery samples:{len(data_va_gallery)}.\n')
print('Loading Done\n')
# Model
self.model = SnMpNet(semantic_dim=args.semantic_emb_size, pretrained='imagenet', num_tr_classes=len(self.tr_classes)).cuda()
self.glove_embed_seen = np.array([semantic_vec.get(cl) for cl in self.tr_classes])
self.retrieval_loss = Mixup_Cosine_CCE(torch.from_numpy(self.glove_embed_seen).float().cuda())
self.embedding_loss = Mixup_Euclidean_MSE(torch.from_numpy(self.glove_embed_seen).float().cuda(), args.alpha)
self.RG = np.random.default_rng()
if args.optimizer=='sgd':
self.optimizer = optim.SGD(self.model.parameters(), weight_decay=args.l2_reg, momentum=args.momentum, nesterov=False, lr=args.lr)
elif args.optimizer=='adam':
self.optimizer = optim.Adam(self.model.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=args.l2_reg)
if args.dataset=='DomainNet':
save_folder_name = 'seen-'+args.seen_domain+'_unseen-'+args.holdout_domain+'_x_'+args.gallery_domain
if not args.include_auxillary_domains:
save_folder_name += '_noaux'
if args.dataset=='Sketchy':
if args.is_eccv_split:
save_folder_name = 'eccv_split'
else:
save_folder_name = 'random_split'
else:
save_folder_name = ''
if args.dataset=='DomainNet' or (args.dataset=='Sketchy' and args.is_eccv_split):
self.map_metric = 'aps@200'
self.prec_metric = 'prec@200'
else:
self.map_metric = 'aps@all'
self.prec_metric = 'prec@100'
self.path_cp = os.path.join(args.checkpoint_path, args.dataset, save_folder_name)
self.suffix = '_mixlevel-'+args.mixup_level+'_wcce-'+str(args.wcce)+'_wratio-'+str(args.wratio)+'_wmse-'+str(args.wmse)+\
'_clswts-'+str(args.alpha)+'_e-'+str(args.epochs)+'_es-'+str(args.early_stop)+'_opt-'+args.optimizer+\
'_bs-'+str(args.batch_size)+'_lr-'+str(args.lr)+'_l2-'+str(args.l2_reg)+'_beta-'+str(args.mixup_beta)+\
'_warmup-'+str(args.mixup_step)+'_seed-'+str(args.seed)+'_tv-'+str(args.trainvalid)
# exit(0)
path_log = os.path.join('./logs', args.dataset, save_folder_name, self.suffix)
# Logger
print('Setting logger...', end='')
self.logger = SummaryWriter(path_log)
print('Done\n')
self.start_epoch = 0
self.best_map = 0
self.early_stop_counter = 0
self.last_chkpt_name='init'
self.resume_from_checkpoint(args.resume_dict)
def adjust_learning_rate(self, min_lr=1e-6):
# lr = args.lr * 0.5 * (1.0 + math.cos(float(epoch) / args.epochs * math.pi))
# epoch_curr = min(epoch, 20)
# lr = args.lr * math.pow(0.001, float(epoch_curr)/ 20 )
lr = self.args.lr * math.pow(1e-3, float(self.current_epoch)/20)
lr = max(lr, min_lr)
# print('epoch: {}, lr: {}'.format(epoch, lr))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def resume_from_checkpoint(self, resume_dict):
if resume_dict is not None:
print('==> Resuming from checkpoint: ',resume_dict)
checkpoint = torch.load(os.path.join(self.path_cp, resume_dict+'.pth'))
self.start_epoch = checkpoint['epoch']+1
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.best_map = checkpoint['best_map']
self.last_chkpt_name = resume_dict
def swap(self, xs, a, b):
xs[a], xs[b] = xs[b], xs[a]
def derange(self, xs):
x_new = [] + xs
for a in range(1, len(x_new)):
b = self.RG.choice(range(0, a))
self.swap(x_new, a, b)
return x_new
def soft_cce(self, y_pred, y_true):
loss = -torch.sum(y_true*torch.log_softmax(y_pred, dim=1), dim=1)
return loss.mean()
def get_mixed_samples(self, X, y, domain_ids, mixup_level):
batch_ratios = self.RG.beta(self.mixup_beta, self.mixup_beta, size=X.size(0))
if mixup_level=='feat':
ratio = np.expand_dims(batch_ratios, axis=1)
elif mixup_level=='img':
ratio = np.expand_dims(batch_ratios, axis=(1, 2, 3))
ratio = torch.from_numpy(ratio).float().cuda()
doms = list(range(len(torch.unique(domain_ids))))
bs = X.size(0) // len(doms)
selected = self.derange(doms)
permuted_across_dom = torch.cat([(torch.randperm(bs) + selected[i] * bs) for i in range(len(doms))])
permuted_within_dom = torch.cat([(torch.randperm(bs) + i * bs) for i in range(len(doms))])
ratio_within_dom = torch.from_numpy(self.RG.binomial(1, self.mixup_domain, size=X.size(0)))
mixed_indices = ratio_within_dom*permuted_within_dom + (1. - ratio_within_dom)*permuted_across_dom
mixed_indices = mixed_indices.long()
X_mix = ratio*X + (1-ratio)*X[mixed_indices]
y_a, y_b = y, y[mixed_indices]
ratio_vec_gt = torch.zeros([X.size()[0], len(self.tr_classes)]).cuda()
for i in range(X.size()[0]):
ratio_vec_gt[i, y_a[i]] += batch_ratios[i]
ratio_vec_gt[i, y_b[i]] += 1-batch_ratios[i]
return X_mix, y_a, y_b, torch.from_numpy(batch_ratios).float().cuda(), ratio_vec_gt
def do_epoch(self):
self.model.train()
batch_time = AverageMeter()
dist_cce_loss = AverageMeter()
emb_mse_loss = AverageMeter()
ratio_loss = AverageMeter()
total_loss = AverageMeter()
# Start counting time
time_start = time.time()
for i, (im, cl, domain_ids) in enumerate(self.train_loader):
# Transfer im to cuda
im = im.float().cuda()
# Get numeric classes
cls_numeric = torch.from_numpy(utils.numeric_classes(cl, self.dict_clss)).long().cuda()
self.optimizer.zero_grad()
if self.args.mixup_level=='img':
im, y_a, y_b, ratios, ratio_vec_gt = self.get_mixed_samples(im, cls_numeric, domain_ids, 'img')
ratio_vec_pred, features = self.model(im)
if self.args.mixup_level=='feat':
features, y_a, y_b, ratios, ratio_vec_gt = self.get_mixed_samples(features, cls_numeric, domain_ids, 'feat')
sem_out = self.model.base_model.last_linear(features)
# Optimize parameters
cce_l = self.retrieval_loss(sem_out, y_a, y_b, ratios)
mse_l = self.embedding_loss(sem_out, y_a, y_b, ratios)
rat_l = self.soft_cce(ratio_vec_pred, ratio_vec_gt)
loss = self.args.wcce*cce_l + self.args.wmse*mse_l + self.args.wratio*rat_l
loss.backward()
self.optimizer.step()
# Store losses for visualization
dist_cce_loss.update(cce_l.item(), im.size(0))
emb_mse_loss.update(mse_l.item(), im.size(0))
ratio_loss.update(rat_l.item(), im.size(0))
total_loss.update(loss.item(), im.size(0))
# time
time_end = time.time()
batch_time.update(time_end - time_start)
time_start = time_end
if (i + 1) % self.args.log_interval == 0:
print('[Train] Epoch: [{0}/{1}][{2}/{3}]\t'
# 'lr:{3:.6f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'cce {cce.val:.4f} ({cce.avg:.4f})\t'
'mse {mse.val:.4f} ({mse.avg:.4f})\t'
'rat {rat.val:.4f} ({rat.avg:.4f})\t'
'net {net.val:.4f} ({net.avg:.4f})\t'
.format(self.current_epoch+1, self.args.epochs, i+1, len(self.train_loader), batch_time=batch_time,
cce=dist_cce_loss, mse=emb_mse_loss, rat=ratio_loss, net=total_loss))
# if (i+1)==50:
# break
return {'dist_cce':dist_cce_loss.avg, 'emb_mse':emb_mse_loss.avg, 'ratio_cce':ratio_loss.avg, 'net':total_loss.avg}
def do_training(self):
print('***Train***')
for self.current_epoch in range(self.start_epoch, self.args.epochs):
start = time.time()
self.adjust_learning_rate()
self.mixup_beta = min(self.args.mixup_beta, max(self.args.mixup_beta*(self.current_epoch)/self.args.mixup_step, 0.1))
self.mixup_domain = min(1.0, max((2*self.args.mixup_step - self.current_epoch)/self.args.mixup_step, 0.0))
print(f'\nAcross Class Mix Coeff:{self.mixup_beta}; Within Domain Mix Coeff:{self.mixup_domain}.\n')
loss = self.do_epoch()
# evaluate on validation set, map_ since map is already there
print('\n***Validation***')
valid_data = evaluate(self.va_loader_query, self.va_loader_gallery, self.model, self.glove_embed_seen,
self.current_epoch+1, self.args, 'val')
map_ = np.mean(valid_data[self.map_metric])
prec = valid_data[self.prec_metric]
end = time.time()
elapsed = end-start
print(f"Epoch Time:{elapsed//60:.0f}m{elapsed%60:.0f}s lr:{utils.get_lr(self.optimizer):.7f} mAP:{map_:.4f} prec:{prec:.4f}\n")
if map_ > self.best_map:
self.best_map = map_
self.early_stop_counter = 0
model_save_name = 'val_map-'+'{0:.4f}'.format(map_)+'_prec-'+'{0:.4f}'.format(prec)+'_ep-'+str(self.current_epoch+1)+self.suffix
utils.save_checkpoint({
'epoch':self.current_epoch+1,
'model_state_dict':self.model.state_dict(),
'optimizer_state_dict':self.optimizer.state_dict(),
'best_map':self.best_map,
'corr_prec':prec
}, directory=self.path_cp, save_name=model_save_name, last_chkpt=self.last_chkpt_name)
self.last_chkpt_name = model_save_name
else:
self.early_stop_counter += 1
if self.args.early_stop==self.early_stop_counter:
print(f"Validation Performance did not improve for {self.args.early_stop} epochs."
f"Early stopping by {self.args.epochs-self.current_epoch-1} epochs.")
break
print(f"Val mAP hasn't improved from {self.best_map:.4f} for {self.early_stop_counter} epoch(s)!\n")
# Logger step
self.logger.add_scalar('Train/glove Based CE loss', loss['dist_cce'], self.current_epoch)
self.logger.add_scalar('Train/Embedding MSE loss', loss['emb_mse'], self.current_epoch)
self.logger.add_scalar('Train/Mixup Ratio SoftCCE loss', loss['ratio_cce'], self.current_epoch)
self.logger.add_scalar('Train/total loss', loss['net'], self.current_epoch)
self.logger.add_scalar('Val/map', map_, self.current_epoch)
self.logger.add_scalar('Val/prec', prec, self.current_epoch)
self.logger.close()
print('\n***Training and Validation complete***')
def evaluate(loader_sketch, loader_image, model, glove_embed_seen, epoch, args):
# Switch to test mode
model.eval()
batch_time = AverageMeter()
# Start counting time
time_start = time.time()
for i, (sk, cls_sk) in enumerate(loader_sketch):
sk = sk.float().cuda()
# Sketch embedding into a semantic space
with torch.no_grad():
_, sk_feat = model(sk)
sk_em = model.base_model.last_linear(sk_feat)
# Accumulate sketch embedding
if i == 0:
acc_sk_em = sk_em.cpu().data.numpy()
acc_cls_sk = cls_sk
else:
acc_sk_em = np.concatenate((acc_sk_em, sk_em.cpu().data.numpy()), axis=0)
acc_cls_sk = np.concatenate((acc_cls_sk, cls_sk), axis=0)
# time
time_end = time.time()
batch_time.update(time_end - time_start)
time_start = time_end
if (i + 1) % args.log_interval == 0:
print('[Eval][Query] Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
.format(epoch, i + 1, len(loader_sketch), batch_time=batch_time))
for i, (im, cls_im) in enumerate(loader_image):
im = im.float().cuda()
# Image embedding into a semantic space
with torch.no_grad():
_, im_feat = model(im)
im_em = model.base_model.last_linear(im_feat)
# Accumulate sketch embedding
if i == 0:
acc_im_em = im_em.cpu().data.numpy()
acc_cls_im = cls_im
else:
acc_im_em = np.concatenate((acc_im_em, im_em.cpu().data.numpy()), axis=0)
acc_cls_im = np.concatenate((acc_cls_im, cls_im), axis=0)
# time
time_end = time.time()
batch_time.update(time_end - time_start)
time_start = time_end
if (i + 1) % args.log_interval == 0:
print('[Eval][Gallery] Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
.format(epoch, i + 1, len(loader_image), batch_time=batch_time))
print('\nQuery Emb Dim:{}; Gallery Emb Dim:{}'.format(acc_sk_em.shape, acc_im_em.shape))
eval_data = compute_retrieval_metrics(acc_sk_em, acc_cls_sk, acc_im_em, acc_cls_im)
return eval_data |
import numpy as np
import pytest
from pandas import (
Categorical,
Index,
)
import pandas._testing as tm
class TestTake:
# https://github.com/pandas-dev/pandas/issues/20664
def test_take_default_allow_fill(self):
cat = Categorical(["a", "b"])
with tm.assert_produces_warning(None):
result = cat.take([0, -1])
assert result.equals(cat)
def test_take_positive_no_warning(self):
cat = Categorical(["a", "b"])
with tm.assert_produces_warning(None):
cat.take([0, 0])
def test_take_bounds(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = Categorical(["a", "b", "a"])
if allow_fill:
msg = "indices are out-of-bounds"
else:
msg = "index 4 is out of bounds for( axis 0 with)? size 3"
with pytest.raises(IndexError, match=msg):
cat.take([4, 5], allow_fill=allow_fill)
def test_take_empty(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = Categorical([], categories=["a", "b"])
if allow_fill:
msg = "indices are out-of-bounds"
else:
msg = "cannot do a non-empty take from an empty axes"
with pytest.raises(IndexError, match=msg):
cat.take([0], allow_fill=allow_fill)
def test_positional_take(self, ordered):
cat = Categorical(["a", "a", "b", "b"], categories=["b", "a"], ordered=ordered)
result = cat.take([0, 1, 2], allow_fill=False)
expected = Categorical(
["a", "a", "b"], categories=cat.categories, ordered=ordered
)
tm.assert_categorical_equal(result, expected)
def test_positional_take_unobserved(self, ordered):
cat = Categorical(["a", "b"], categories=["a", "b", "c"], ordered=ordered)
result = cat.take([1, 0], allow_fill=False)
expected = Categorical(["b", "a"], categories=cat.categories, ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_take_allow_fill(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = Categorical(["a", "a", "b"])
result = cat.take([0, -1, -1], allow_fill=True)
expected = Categorical(["a", np.nan, np.nan], categories=["a", "b"])
tm.assert_categorical_equal(result, expected)
def test_take_fill_with_negative_one(self):
# -1 was a category
cat = Categorical([-1, 0, 1])
result = cat.take([0, -1, 1], allow_fill=True, fill_value=-1)
expected = Categorical([-1, -1, 0], categories=[-1, 0, 1])
tm.assert_categorical_equal(result, expected)
def test_take_fill_value(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = Categorical(["a", "b", "c"])
result = cat.take([0, 1, -1], fill_value="a", allow_fill=True)
expected = Categorical(["a", "b", "a"], categories=["a", "b", "c"])
tm.assert_categorical_equal(result, expected)
def test_take_fill_value_new_raises(self):
# https://github.com/pandas-dev/pandas/issues/23296
cat = Categorical(["a", "b", "c"])
xpr = r"Cannot setitem on a Categorical with a new category \(d\)"
with pytest.raises(TypeError, match=xpr):
cat.take([0, 1, -1], fill_value="d", allow_fill=True)
def test_take_nd_deprecated(self):
cat = Categorical(["a", "b", "c"])
with tm.assert_produces_warning(FutureWarning):
cat.take_nd([0, 1])
ci = Index(cat)
with tm.assert_produces_warning(FutureWarning):
ci.take_nd([0, 1])
|
# ==============================================================================
### IF YOU ARE RUNNING THIS IN SPYDER MAKE SURE TO USE A NEW CONSOLE EACH TIME
### TO CLEAR THE SESSION
### (press F6, and select 'Execute in a new dedicated Python console')
# ==============================================================================
# Simple script to generate & export tensorflow graph calculating c:=a*b
# based on https://medium.com/jim-fleming/loading-a-tensorflow-graph-with-the-c-api-4caaff88463f#.63x5c9hhg
#
# Modified by Memo Akten to demonstrate ofxMSATensorFlow
# http://github.com/memo/ofxMSATensorFlow
# ==============================================================================
import tensorflow as tf
import shutil
import os
out_path = '../data/models'
out_fname = 'model.pb'
with tf.Session() as sess:
a = tf.Variable(3.0, name='a')
b = tf.Variable(4.0, name='b')
c = tf.mul(a, b, name="c")
sess.run(tf.initialize_all_variables())
print a.eval()
print b.eval()
print c.eval()
# Delete output folder if it exists
if os.path.exists(out_path):
shutil.rmtree(out_path)
# Write graph to disk
tf.train.write_graph(sess.graph_def, out_path, out_fname, as_text=False)
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.0 upgrader in safety mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade_v2_safety
class TfUpgradeV2SafetyTest(test_util.TensorFlowTestCase):
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2_safety.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def testContribWarning(self):
text = "tf.contrib.foo()"
_, report, _, _ = self._upgrade(text)
expected_info = "tf.contrib will not be distributed"
self.assertIn(expected_info, report)
def testTensorFlowImport(self):
text = "import tensorflow as tf"
expected_text = ("import tensorflow.compat.v1 as tf")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow as tf, other_import as y"
expected_text = ("import tensorflow.compat.v1 as tf, other_import as y")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow"
expected_text = ("import tensorflow.compat.v1 as tensorflow")
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow.foo"
expected_text = "import tensorflow.compat.v1.foo"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow.foo as bar"
expected_text = "import tensorflow.compat.v1.foo as bar"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testTensorFlowGoogleImport(self):
text = "import tensorflow.google as tf"
expected_text = "import tensorflow.google.compat.v1 as tf"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow.google"
expected_text = "import tensorflow.google.compat.v1"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow.google.compat.v1 as tf"
expected_text = "import tensorflow.google.compat.v1 as tf"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "import tensorflow.google.compat.v2 as tf"
expected_text = "import tensorflow.google.compat.v2 as tf"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testTensorFlowImportInIndent(self):
text = """
try:
import tensorflow as tf # import line
tf.ones([4, 5])
except AttributeError:
pass
"""
expected_text = """
try:
import tensorflow.compat.v1 as tf # import line
tf.ones([4, 5])
except AttributeError:
pass
"""
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testTensorFlowFromImport(self):
text = "from tensorflow import foo"
expected_text = "from tensorflow.compat.v1 import foo"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "from tensorflow.foo import bar"
expected_text = "from tensorflow.compat.v1.foo import bar"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
text = "from tensorflow import *"
expected_text = "from tensorflow.compat.v1 import *"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(expected_text, new_text)
def testTensorFlowImportAlreadyHasCompat(self):
text = "import tensorflow.compat.v1 as tf"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "import tensorflow.compat.v2 as tf"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "from tensorflow.compat import v2 as tf"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
def testTensorFlowGoogleFromImport(self):
text = "from tensorflow.google.compat import v1 as tf"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "from tensorflow.google.compat import v2 as tf"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
def testTensorFlowDontChangeContrib(self):
text = "import tensorflow.contrib as foo"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "from tensorflow import contrib"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
def test_contrib_to_addons_move(self):
small_mapping = {
"tf.contrib.layers.poincare_normalize":
"tfa.layers.PoincareNormalize",
"tf.contrib.layers.maxout":
"tfa.layers.Maxout",
"tf.contrib.layers.group_norm":
"tfa.layers.GroupNormalization",
"tf.contrib.layers.instance_norm":
"tfa.layers.InstanceNormalization",
}
for symbol, replacement in small_mapping.items():
text = "{}('stuff', *args, **kwargs)".format(symbol)
_, report, _, _ = self._upgrade(text)
self.assertIn(replacement, report)
if __name__ == "__main__":
test_lib.main()
def testTensorFlowDontChangeContrib(self):
text = "import tensorflow.contrib as foo"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
text = "from tensorflow import contrib"
_, _, _, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
def test_contrib_to_addons_move(self):
small_mapping = {
"tf.contrib.layers.poincare_normalize":
"tfa.layers.PoincareNormalize",
"tf.contrib.layers.maxout":
"tfa.layers.Maxout",
"tf.contrib.layers.group_norm":
"tfa.layers.GroupNormalization",
"tf.contrib.layers.instance_norm":
"tfa.layers.InstanceNormalization",
}
for symbol, replacement in small_mapping.items():
text = "{}('stuff', *args, **kwargs)".format(symbol)
_, report, _, _ = self._upgrade(text)
self.assertIn(replacement, report)
if __name__ == "__main__":
test_lib.main()
|
from collections.abc import Mapping
import logging
import os
import pandas
from .models import (
genome_name_from_library,
load_gtf_cache,
)
logger = logging.getLogger(__name__)
class GTFCache(Mapping):
"""Map library IDs to the correct GTF annotation table
"""
def __init__(self, libraries, genome_dir):
"""Initializat class
:Parameters:
- libraries: (DataFrame) Library Table
- genome_dir: (directory) root path where indexes are stored
"""
assert isinstance(libraries, pandas.DataFrame)
self._libraries = libraries
self._genome_dir = genome_dir
self._gtf_cache = {}
def __getitem__(self, key):
"""Return gtf cache for a library_id
:Parameters:
- key: library ID
"""
row = self._libraries.loc[key]
genome_name = genome_name_from_library(row)
return self.get_gtf_cache(genome_name)
def __iter__(self):
"""iterator of keys
"""
for k in self._libraries.index:
yield k
def __len__(self):
"""Return number of records
"""
return len(self._libraries.index)
def get_gtf_cache(self, genome_name):
"""Look through list of libraries and attempt to load GTF caches
"""
if genome_name not in self._gtf_cache:
cache_pathname = self._get_gtf_cache_filename(genome_name)
logging.debug('Searching for %s', cache_pathname)
try:
self._gtf_cache[genome_name] = load_gtf_cache(cache_pathname)
except FileNotFoundError as e:
logging.error('Unable to load gene cache %s', cache_pathname)
return self._gtf_cache.get(genome_name)
def _get_gtf_cache_filename(self, genome_name):
"""Return the expected h5 cache file name
:Paramters:
- genome_name: (string) like mm10-M4-male
:Returns: Filename
"""
if self._genome_dir is None:
logger.error("genome_dir is not specified. Please configure")
raise ValueError("genome_dir is not set")
return os.path.join(self._genome_dir, genome_name, genome_name + '.h5')
def protein_coding_gene_ids(annotation):
"""Filter GTF just protein coding genes
"""
entry_type = (annotation['type'] == 'gene')
gene_type = (annotation['gene_type'] == 'protein_coding')
return annotation[entry_type & gene_type]['gene_id']
|
"""Unittests for library.
Run using `python -m unittest` in the top level directory folder.
"""
import os
import sys
add_path = os.path.dirname(os.path.dirname((os.path.realpath(__file__))))
# Ensure that the script lib is on path for imports.
sys.path.insert(0, add_path)
|
from django.shortcuts import render,get_object_or_404
from .models import Artist, Album, Song, SongReview
from django.contrib.auth.decorators import login_required
from .forms import GenreForm, ArtistForm, AlbumForm, SongForm, SongReviewForm
# Create your views here.
#Index view
def index(request):
return render(request,'Music/index.html')
#Artist list view
def getArtist(request):
artist_list = Artist.objects.all()
context={'artist_list':artist_list}
return render(request, 'Music/artist.html', context=context)
#Album list view
def getAlbum(request):
album_list = Album.objects.all()
context={'album_list':album_list}
return render(request, 'Music/album.html', context=context)
#Song list view
def getSong(request):
song_list = Song.objects.all()
context={'song_list':song_list}
return render(request, 'Music/song.html', context=context)
#Album song list view
def getAlbumList(request,id):
album_list = Song.objects.filter(album=id)
context={
'album_list': album_list
}
return render(request, 'Music/albumlist.html', context=context)
#Review list view
def getSongReview(request,id):
s = get_object_or_404(Song, pk=id)
title = s.songtitle
artist = s.artist
a = Artist.objects.get(name=artist)
aid = a.id
review_list = SongReview.objects.filter(song=id)
context={
'aid':aid,
'title':title,
'artist':artist,
'review_list': review_list
}
return render(request, 'Music/songreview.html', context=context)
#Artist all song view
def getArtistSong(request,id):
artist_song_list = Song.objects.filter(artist=id)
context={
'artist_song_list':artist_song_list,
}
return render(request, 'Music/artistsong.html', context=context)
#form view
@login_required
def newGenre(request):
form=GenreForm
if request.method=='POST':
form=GenreForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=GenreForm()
else:
form=GenreForm()
return render(request,'Music/newGenre.html',{'form':form})
@login_required
def newArtist(request):
form=ArtistForm
if request.method=='POST':
form=ArtistForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=ArtistForm()
else:
form=ArtistForm()
return render(request,'Music/newArtist.html',{'form':form})
@login_required
def newAlbum(request):
form=AlbumForm
if request.method=='POST':
form=AlbumForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=AlbumForm()
else:
form=AlbumForm()
return render(request,'Music/newAlbum.html',{'form':form})
@login_required
def newSong(request):
form=SongForm
if request.method=='POST':
form=SongForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=SongForm()
else:
form=SongForm()
return render(request,'Music/newSong.html',{'form':form})
@login_required
def newSongReview(request):
form=SongReviewForm
if request.method=='POST':
form=SongReviewForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=SongReviewForm()
else:
form=SongReviewForm()
return render(request,'Music/newSongReview.html',{'form':form})
def loginMessage(request):
return render(request,'Music/loginMessage.html')
def logoutMessage(request):
return render(request,'Music/logoutMessage.html') |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/tasks_v2beta3/proto/task.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.tasks_v2beta3.proto import target_pb2 as google_dot_cloud_dot_tasks__v2beta3_dot_proto_dot_target__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/tasks_v2beta3/proto/task.proto',
package='google.cloud.tasks.v2beta3',
syntax='proto3',
serialized_pb=_b('\n+google/cloud/tasks_v2beta3/proto/task.proto\x12\x1agoogle.cloud.tasks.v2beta3\x1a\x1cgoogle/api/annotations.proto\x1a-google/cloud/tasks_v2beta3/proto/target.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"\xec\x03\n\x04Task\x12\x0c\n\x04name\x18\x01 \x01(\t\x12S\n\x17\x61pp_engine_http_request\x18\x03 \x01(\x0b\x32\x30.google.cloud.tasks.v2beta3.AppEngineHttpRequestH\x00\x12\x31\n\rschedule_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x63reate_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x16\n\x0e\x64ispatch_count\x18\x06 \x01(\x05\x12\x16\n\x0eresponse_count\x18\x07 \x01(\x05\x12:\n\rfirst_attempt\x18\x08 \x01(\x0b\x32#.google.cloud.tasks.v2beta3.Attempt\x12\x39\n\x0clast_attempt\x18\t \x01(\x0b\x32#.google.cloud.tasks.v2beta3.Attempt\x12\x33\n\x04view\x18\n \x01(\x0e\x32%.google.cloud.tasks.v2beta3.Task.View\"1\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\t\n\x05\x42\x41SIC\x10\x01\x12\x08\n\x04\x46ULL\x10\x02\x42\x0e\n\x0cpayload_type\"\xcf\x01\n\x07\x41ttempt\x12\x31\n\rschedule_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rdispatch_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x31\n\rresponse_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12+\n\x0fresponse_status\x18\x04 \x01(\x0b\x32\x12.google.rpc.StatusBn\n\x1e\x63om.google.cloud.tasks.v2beta3B\tTaskProtoP\x01Z?google.golang.org/genproto/googleapis/cloud/tasks/v2beta3;tasksb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_tasks__v2beta3_dot_proto_dot_target__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
_TASK_VIEW = _descriptor.EnumDescriptor(
name='View',
full_name='google.cloud.tasks.v2beta3.Task.View',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='VIEW_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BASIC', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FULL', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=638,
serialized_end=687,
)
_sym_db.RegisterEnumDescriptor(_TASK_VIEW)
_TASK = _descriptor.Descriptor(
name='Task',
full_name='google.cloud.tasks.v2beta3.Task',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.cloud.tasks.v2beta3.Task.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='app_engine_http_request', full_name='google.cloud.tasks.v2beta3.Task.app_engine_http_request', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='schedule_time', full_name='google.cloud.tasks.v2beta3.Task.schedule_time', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='create_time', full_name='google.cloud.tasks.v2beta3.Task.create_time', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dispatch_count', full_name='google.cloud.tasks.v2beta3.Task.dispatch_count', index=4,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='response_count', full_name='google.cloud.tasks.v2beta3.Task.response_count', index=5,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='first_attempt', full_name='google.cloud.tasks.v2beta3.Task.first_attempt', index=6,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='last_attempt', full_name='google.cloud.tasks.v2beta3.Task.last_attempt', index=7,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='view', full_name='google.cloud.tasks.v2beta3.Task.view', index=8,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_TASK_VIEW,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='payload_type', full_name='google.cloud.tasks.v2beta3.Task.payload_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=211,
serialized_end=703,
)
_ATTEMPT = _descriptor.Descriptor(
name='Attempt',
full_name='google.cloud.tasks.v2beta3.Attempt',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='schedule_time', full_name='google.cloud.tasks.v2beta3.Attempt.schedule_time', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dispatch_time', full_name='google.cloud.tasks.v2beta3.Attempt.dispatch_time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='response_time', full_name='google.cloud.tasks.v2beta3.Attempt.response_time', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='response_status', full_name='google.cloud.tasks.v2beta3.Attempt.response_status', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=706,
serialized_end=913,
)
_TASK.fields_by_name['app_engine_http_request'].message_type = google_dot_cloud_dot_tasks__v2beta3_dot_proto_dot_target__pb2._APPENGINEHTTPREQUEST
_TASK.fields_by_name['schedule_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TASK.fields_by_name['create_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TASK.fields_by_name['first_attempt'].message_type = _ATTEMPT
_TASK.fields_by_name['last_attempt'].message_type = _ATTEMPT
_TASK.fields_by_name['view'].enum_type = _TASK_VIEW
_TASK_VIEW.containing_type = _TASK
_TASK.oneofs_by_name['payload_type'].fields.append(
_TASK.fields_by_name['app_engine_http_request'])
_TASK.fields_by_name['app_engine_http_request'].containing_oneof = _TASK.oneofs_by_name['payload_type']
_ATTEMPT.fields_by_name['schedule_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ATTEMPT.fields_by_name['dispatch_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ATTEMPT.fields_by_name['response_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ATTEMPT.fields_by_name['response_status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
DESCRIPTOR.message_types_by_name['Task'] = _TASK
DESCRIPTOR.message_types_by_name['Attempt'] = _ATTEMPT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Task = _reflection.GeneratedProtocolMessageType('Task', (_message.Message,), dict(
DESCRIPTOR = _TASK,
__module__ = 'google.cloud.tasks_v2beta3.proto.task_pb2'
,
__doc__ = """A unit of scheduled work.
Attributes:
name:
Optionally caller-specified in [CreateTask][google.cloud.tasks
.v2beta3.CloudTasks.CreateTask]. The task name. The task
name must have the following format: ``projects/PROJECT_ID/loc
ations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`` -
``PROJECT_ID`` can contain letters ([A-Za-z]), numbers
([0-9]), hyphens (-), colons (:), or periods (.). For more
information, see `Identifying projects
<https://cloud.google.com/resource-manager/docs/creating-
managing-projects#identifying_projects>`_ - ``LOCATION_ID``
is the canonical ID for the task's location. The list of
available locations can be obtained by calling [ListLocatio
ns][google.cloud.location.Locations.ListLocations]. For
more information, see
https://cloud.google.com/about/locations/. - ``QUEUE_ID`` can
contain letters ([A-Za-z]), numbers ([0-9]), or hyphens
(-). The maximum length is 100 characters. - ``TASK_ID`` can
contain only letters ([A-Za-z]), numbers ([0-9]), hyphens
(-), or underscores (\_). The maximum length is 500
characters.
payload_type:
Required. The task's payload is used by the task's target to
process the task. A payload is valid only if it is compatible
with the queue's target.
app_engine_http_request:
App Engine HTTP request that is sent to the task's target. Can
be set only if [app\_engine\_http\_queue][google.cloud.tasks.v
2beta3.Queue.app\_engine\_http\_queue] is set on the queue.
An App Engine task is a task that has [AppEngineHttpRequest][g
oogle.cloud.tasks.v2beta3.AppEngineHttpRequest] set.
schedule_time:
The time when the task is scheduled to be attempted. For App
Engine queues, this is when the task will be attempted or
retried. ``schedule_time`` will be truncated to the nearest
microsecond.
create_time:
Output only. The time that the task was created.
``create_time`` will be truncated to the nearest second.
dispatch_count:
Output only. The number of attempts dispatched. This count
includes tasks which have been dispatched but haven't received
a response.
response_count:
Output only. The number of attempts which have received a
response.
first_attempt:
Output only. The status of the task's first attempt. Only [di
spatch\_time][google.cloud.tasks.v2beta3.Attempt.dispatch\_tim
e] will be set. The other
[Attempt][google.cloud.tasks.v2beta3.Attempt] information is
not retained by Cloud Tasks.
last_attempt:
Output only. The status of the task's last attempt.
view:
Output only. The view specifies which subset of the
[Task][google.cloud.tasks.v2beta3.Task] has been returned.
""",
# @@protoc_insertion_point(class_scope:google.cloud.tasks.v2beta3.Task)
))
_sym_db.RegisterMessage(Task)
Attempt = _reflection.GeneratedProtocolMessageType('Attempt', (_message.Message,), dict(
DESCRIPTOR = _ATTEMPT,
__module__ = 'google.cloud.tasks_v2beta3.proto.task_pb2'
,
__doc__ = """The status of a task attempt.
Attributes:
schedule_time:
Output only. The time that this attempt was scheduled.
``schedule_time`` will be truncated to the nearest
microsecond.
dispatch_time:
Output only. The time that this attempt was dispatched.
``dispatch_time`` will be truncated to the nearest
microsecond.
response_time:
Output only. The time that this attempt response was received.
``response_time`` will be truncated to the nearest
microsecond.
response_status:
Output only. The response from the target for this attempt.
If ``response_time`` is unset, then the task has not been
attempted or is currently running and the ``response_status``
field is meaningless.
""",
# @@protoc_insertion_point(class_scope:google.cloud.tasks.v2beta3.Attempt)
))
_sym_db.RegisterMessage(Attempt)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\036com.google.cloud.tasks.v2beta3B\tTaskProtoP\001Z?google.golang.org/genproto/googleapis/cloud/tasks/v2beta3;tasks'))
# @@protoc_insertion_point(module_scope)
|
from .endpoint import Endpoint, QuerysetEndpoint, api
from .exceptions import FlowRunFailedException, FlowRunCancelledException
from .. import FlowRunItem, PaginationItem
from ...exponential_backoff import ExponentialBackoffTimer
import logging
logger = logging.getLogger("tableau.endpoint.flowruns")
class FlowRuns(QuerysetEndpoint):
def __init__(self, parent_srv):
super(FlowRuns, self).__init__(parent_srv)
@property
def baseurl(self):
return "{0}/sites/{1}/flows/runs".format(self.parent_srv.baseurl, self.parent_srv.site_id)
# Get all flows
@api(version="3.10")
def get(self, req_options=None):
logger.info("Querying all flow runs on site")
url = self.baseurl
server_response = self.get_request(url, req_options)
pagination_item = PaginationItem.from_response(server_response.content, self.parent_srv.namespace)
all_flow_run_items = FlowRunItem.from_response(server_response.content, self.parent_srv.namespace)
return all_flow_run_items, pagination_item
# Get 1 flow by id
@api(version="3.10")
def get_by_id(self, flow_run_id):
if not flow_run_id:
error = "Flow ID undefined."
raise ValueError(error)
logger.info("Querying single flow (ID: {0})".format(flow_run_id))
url = "{0}/{1}".format(self.baseurl, flow_run_id)
server_response = self.get_request(url)
return FlowRunItem.from_response(server_response.content, self.parent_srv.namespace)[0]
# Cancel 1 flow run by id
@api(version="3.10")
def cancel(self, flow_run_id):
if not flow_run_id:
error = "Flow ID undefined."
raise ValueError(error)
id_ = getattr(flow_run_id, 'id', flow_run_id)
url = "{0}/{1}".format(self.baseurl, id_)
self.put_request(url)
logger.info("Deleted single flow (ID: {0})".format(id_))
@api(version="3.10")
def wait_for_job(self, flow_run_id, *, timeout=None):
if isinstance(flow_run_id, FlowRunItem):
flow_run_id = flow_run_id.id
assert isinstance(flow_run_id, str)
logger.debug(f"Waiting for flow run {flow_run_id}")
backoffTimer = ExponentialBackoffTimer(timeout=timeout)
flow_run = self.get_by_id(flow_run_id)
while flow_run.completed_at is None:
backoffTimer.sleep()
flow_run = self.get_by_id(flow_run_id)
logger.debug(f"\tFlowRun {flow_run_id} progress={flow_run.progress}")
logger.info("FlowRun {} Completed: Status: {}".format(flow_run_id, flow_run.status))
if flow_run.status == "Success":
return flow_run
elif flow_run.status == "Failed":
raise FlowRunFailedException(flow_run)
elif flow_run.status == "Cancelled":
raise FlowRunCancelledException(flow_run)
else:
raise AssertionError("Unexpected status in flow_run", flow_run) |
import configparser
from datetime import datetime
import discord
from discord.ext import tasks
@tasks.loop(hours=24)
async def activity_loop(bot):
await bot.wait_until_ready()
if bot.is_closed(): return
current_day = datetime.today().strftime("%A")
await bot.change_presence(
activity=discord.Activity(
type=discord.ActivityType.watching,
name=f'it be {current_day}'
)
)
with open("config.ini") as file:
config = configparser.RawConfigParser(allow_no_value=True)
config.read_string(file.read())
channel = bot.get_channel(config.getint('parameters', 'channel-id'))
if channel is None:
raise Exception(
"Please invite this bot into a server with a channel that you've specified in the `.env` file."
)
await channel.send(
f'When it {current_day} {config.get("videos", current_day.lower())}'
)
|
#!/usr/bin/env python
# encoding: utf-8
import json
data = [{'a': 'A', 'b': (2, 4), 'c': 3.0}]
print 'DATA:', repr(data)
print 'repr(data) :', len(repr(data))
print 'dumps(data) :', len(json.dumps(data))
print 'dumps(data, indent=2) :', len(json.dumps(data, indent=2))
print 'dumps(data, separators):', len(json.dumps(data, separators=(',', ':')))
print 'dumps(data, separators):', json.dumps(data, separators=(',', ':'))
|
import numpy as np
import pandas as pd
class Dataset(object):
def __init__(self , data_path):
self.df = pd.read_csv(data_path, header = None)
self.Y = self.one_hot_encoding(self.df.iloc[0:, 4].values)
self.x = self.standard_deviation(self.df.iloc[0:, [0 , 1 , 2 , 3]].values)
def split_data(self , x , Y , p):
data = []
for i in range(x.shape[0]):
data.append([])
data[i].append(np.array(x[i]))
data[i].append(np.array(Y[i]))
np.random.shuffle(data)
split = int(Y.shape[0] * p)
data = np.array(data)
self.train_x , self.train_Y = data[: split , 0] , data[: split , 1]
self.test_x , self.test_Y = data[split: , 0] , data[split: , 1]
self.train_x = np.array([x.tolist() for x in self.train_x.tolist()])
self.train_Y = np.array([Y.tolist() for Y in self.train_Y.tolist()])
self.test_x = np.array([x.tolist() for x in self.test_x.tolist()])
self.test_Y = np.array([Y.tolist() for Y in self.test_Y.tolist()])
return self
def standard_deviation(self , X):
X_std = np.copy(X)
for i in range(0 , X.shape[1]):
X_std[: , i] = (X[: , i] - X[: , i].mean()) / X[: , i].std()
return X_std
def one_hot_encoding(self,Y):
classes = np.unique(Y)
number = [x for x in range(0 , classes.shape[0])]
a = np.array([classes , number]).T
for i in range(0 , a.shape[0]):
Y = np.where(Y == a[i][0] , a[i][1] , Y)
Y = [i for i in Y]
targets = np.array(Y).reshape(-1)
one_hot_targets = np.eye(a.shape[0])[targets]
return one_hot_targets
def output_process(self,Y):
output = list()
for i in Y:
output.append(self.output_transform(i))
return np.array(output)
|
from anoncreds.protocol.utils import crypto_int_to_str, isCryptoInteger, intToArrayBytes
def get_claim_request_libindy_msg(claim_req, schema_seq_no):
return ({
'type': 'CLAIM_REQUEST',
'data': {
'issuer_did': 'FuN98eH2eZybECWkofW6A9BKJxxnTatBCopfUiNxo6ZB',
'blinded_ms': {
'prover_did': 'b1134a647eb818069c089e7694f63e6d',
'u': str(crypto_int_to_str(claim_req.U)),
'ur': None
},
'schema_seq_no': schema_seq_no
},
'nonce': 'b1134a647eb818069c089e7694f63e6d',
})
def get_claim_libindy_msg(signature, schema_seq_no):
return ({'type': 'CLAIM',
'refRequestId': 1498207862797639,
'data': {
'claim': '{'
'"ssn": ["123-45-6789", "744326867119662813058574151710572260086480987778735990385444735594385781152"], '
'"student_name": ["Alice Garcia", "42269428060847300013074105341288624461740820166347597208920185513943254001053"], '
'"year": ["2015", "76155730627064255622230347398579434243999717245284701820698087443021519005597"],'
'"status": ["graduated", "79954080701401061138041003494589205197191732193019334789897013390726508263804"], '
'"degree": ["Bachelor of Science, Marketing", "111351644242834420607747624840774158853435703856237568018084128306949040580032"]}',
'schema_seq_no': schema_seq_no,
'revoc_reg_seq_no': None,
'issuer_did': 'FuN98eH2eZybECWkofW6A9BKJxxnTatBCopfUiNxo6ZB',
'signature': {
'non_revocation_claim': None,
'primary_claim': {
'm2': '{}'.format(crypto_int_to_str(signature.primaryClaim.m2)),
'e': '{}'.format(str(signature.primaryClaim.e)),
'v': '{}'.format(str(signature.primaryClaim.v)),
'a': '{}'.format(crypto_int_to_str(signature.primaryClaim.A))}
}
},
'reqId': 1498207879197729,
'signature': '3v4CJnCpFv3on9DJKzourd9RfvX3gz5yXY1jkhxc8FktHVbvx1ghBJC7DUYMAJzApPUAYMyTzyMB6Dm8HEzhAtvM',
'identifier': 'ULtgFQJe6bjiFbs7ke3NJD'}, ('Faber College', ('127.0.0.1', 6918)))
def get_proof_libindy_msg(link, proof_req, proof, uuid, schema_seq_no):
eqProof = proof.proofs[str(uuid)].proof.primaryProof.eqProof
return ({'type': 'PROOF',
'nonce': '{}'.format(link.request_nonce),
'proof_request': proof_req.to_str_dict(),
'proof': {
'proofs': {
uuid: {
'proof': {
'primary_proof': {
'eq_proof': {
'revealed_attrs': {k: str(v) for k, v in eqProof.revealedAttrs.items()},
'a_prime': '{}'.format(crypto_int_to_str(eqProof.Aprime)),
'e': '{}'.format(crypto_int_to_str(eqProof.e)),
'v': '{}'.format(crypto_int_to_str(eqProof.v)),
'm': {k: str(crypto_int_to_str(v)) for k, v in eqProof.m.items()},
'm1': '{}'.format(crypto_int_to_str(eqProof.m1)),
'm2': '{}'.format(crypto_int_to_str(eqProof.m2))
},
'ge_proofs': {}
},
'non_revoc_proof': None
},
'issuer_did': 'FuN98eH2eZybECWkofW6A9BKJxxnTatBCopfUiNxo6ZB',
'schema_seq_no': schema_seq_no,
'revoc_reg_seq_no': None
}
},
'aggregated_proof': {
'c_hash': '{}'.format(str(proof.aggregatedProof.cHash)),
'c_list': [intToArrayBytes(v) for v in proof.aggregatedProof.CList if isCryptoInteger(v)]
},
'requested_proof': {
'revealed_attrs': proof.requestedProof.revealed_attrs,
'unrevealed_attrs': proof.requestedProof.unrevealed_attrs,
'self_attested_attrs': proof.requestedProof.self_attested_attrs,
'predicates': proof.requestedProof.predicates
}
}})
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from fabric_cm.credmgr.swagger_client.api.default_api import DefaultApi
from fabric_cm.credmgr.swagger_client.api.tokens_api import TokensApi
|
"""add last_modified to gym_defenders
Revision ID: 35dee4fce912
Revises: 5f84a4df8243
Create Date: 2017-11-01 14:27:52.303046
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '35dee4fce912'
down_revision = '5f84a4df8243'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('gym_defenders', sa.Column('last_modified', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('gym_defenders', 'last_modified')
# ### end Alembic commands ###
|
from random import choice
from aiohttp import ClientSession
from io import TextIOWrapper, BytesIO
from re import compile
version = '0.1.4'
animals = [
"cat", "dog", "bird", "panda",
"redpanda", "koala", "fox", "whale",
"kangaroo", "bunny", "lion", "bear",
"frog", "duck", "penguin"
]
get_animal_name = compile('/img/([a-z]+)/').findall
get_file_name = compile('/img/([a-z]+)/([0-9]+.[a-z]+)').findall
get_file_extension = compile('[0-9]+.([a-z]+)').findall
class AnimalityResponse:
__slots__ = ('fact', 'image', '_image_bytes', '_request_image')
def __init__(self, fact: str, image: str, request):
self.fact = fact
self.image = image
self._image_bytes = None
self._request_image = lambda: None if self._image_bytes else request(get_file_name(self.image)[0])
def __repr__(self) -> str:
return f"<Animal[{self.name.upper()}] fact='{self.fact}' image='{self.image}'>"
@property
def name(self) -> str:
""" Returns the animal name. """
return get_animal_name(self.image)[0]
async def get_image(self) -> bytes:
""" Downloads the image from the API. """
try:
response = await self._request_image()
self._image_bytes = await response.read()
return self._image_bytes
except AttributeError:
return self._image_bytes
except:
raise Exception("Client already closed.")
async def save_image(self, data: "io.TextIOWrapper | io.BytesIO | str") -> None:
""" Writes the animal image to a file. """
if isinstance(data, TextIOWrapper):
if data.mode != 'wb':
raise TypeError(f"Invalid mode. Try 'open(\"{data.name}\", \"wb\")'.")
file_extension = get_file_extension(self.image)[0]
if data.name[-len(file_extension):] != file_extension:
raise IOError(f"Invalid file extension. It must be .{file_extension}")
return data.write(await self.get_image()) and None
elif isinstance(data, BytesIO):
return data.write(await self.get_image()) and None
elif isinstance(data, str):
file_extension = get_file_extension(self.image)[0]
if data[-len(file_extension):] != file_extension:
data += f".{file_extension}"
with open(data, "wb") as f:
f.write(await self.get_image())
return f.close()
raise TypeError(f"Expected io.TextIOWrapper, io.BytesIO, or str. got {data.__class__.__name__}")
class AnimalityClient:
__slots__ = ('_request', '_close')
def __init__(self, session: "ClientSession" = None):
if session and not (isinstance(session, ClientSession) and not session.closed):
raise TypeError("Invalid client session. A session must be an instance of aiohttp.ClientSession and it must not be closed.")
else:
session = ClientSession()
self._request = lambda path: session.get(f'https://{"" if path[5:] == "/img/" else "api."}animality.xyz{path}')
self._close = lambda: None if session.closed else session.close
@property
def closed(self) -> bool:
""" Returns true if the client is closed. """
return self._close is None
def __repr__(self) -> str:
return f"<AnimalityClient closed={self.closed}>"
async def get(self, animal: str) -> "AnimalityResponse":
""" Fetches random image and fact for a specific animal. """
animal = animal.strip(' ').lower() if isinstance(animal, str) else ''
if animal not in animals:
raise FileNotFoundError(f"The animal: '{animal}' is not supported yet.")
try:
fact = await self._request(f'/fact/{animal}')
fact = await fact.json()
image = await self._request(f'/img/{animal}')
image = await image.json()
return AnimalityResponse(fact['fact'], image['link'], self._request)
except:
raise Exception("An error occurred while requesting to the API. Please try again later.")
async def random(self) -> "AnimalityResponse":
""" Does the same as get() function except it uses a random animal. """
return await self.get(choice(animals))
async def test(self) -> bool:
""" Tests if the API is working or not. Returns a boolean regardless of errors. """
try:
response = await self._request('/')
response = await response.json()
return response['success'] == 'API is ACTIVE'
except:
return False
async def close(self) -> None:
""" Closes the client. """
try:
await self._close()()
except:
return
async def get(animal: str, *, session: "aiohttp.ClientSession | AnimalityClient" = None) -> "ClientResponse":
""" Fetches random image and fact for a specific animal. """
client = AnimalityClient(session if session and not isinstance(session, AnimalityClient) else None)
ret, error = None, None
try:
ret = await client.get(animal)
except Exception as e:
error = e
await client.close()
if error:
raise error
return ret
async def random(*, session: "aiohttp.ClientSession | AnimalityClient" = None) -> "ClientResponse":
""" Fetches random image and fact for a random animal. """
client = AnimalityClient(session if session and not isinstance(session, AnimalityClient) else None)
ret, error = None, None
try:
ret = await client.random()
except Exception as e:
error = e
await client.close()
if error:
raise error
return ret
|
import xadmin
from .models import Banner
class BannerAdmin(object):
# 显示不要用image,而应该用image_img
list_display = ['title', 'image_img', 'url', 'index', 'add_time']
search_fields = ['title', 'url', 'index']
list_filter = ['title', 'url', 'index', 'add_time']
model_icon = 'fa fa-picture-o'
# 注册轮播图
xadmin.site.register(Banner, BannerAdmin) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Usage: program.py <customer>
program.py <customer>
program.py (-h | --help)
program.py --version
Options:
-h --help Show help.
--version Show version.
"""
# TODO
#
# Ty found these errors
# Group-20 and Group-30 are identical, including rxnId. Another pair is Group-1383 and Group-1417. In total there are 80 pairs with identical rxnIds, and three sets of three identical rxnIds. I spot checked a few, all the info (SET, bel statements, etc) were the same within a pair that have the same rxnId.
# WSH Should be fixed by creating a set from ReactionList- Looks like the human, mouse and rat share rxnIds. I’ll fix that before the next drop.
# There are cases of identical “=>” relationships that are not due to identical rxnIds. I look into one of them (Group-65 and Group-66). In this case, the evidence lines for these groups are the same except for GYS1-a is in one and GYS1-b is in the other. A google search tells me that a is the unphosphorylated form, and b is the phosphorylated form. Everything other than the evidence lines for these groups are the same, which seems like a reasonable interpretation. I looked up a few more of these cases and they all were cases where there were separate reactions for the phosphorylated and unphosphorylated forms of an enzyme catalyzing the same reaction.
# WSH - Not sure how to handle this. Need a more sophisticated parse of Reactome at the very least to capture these subtleties.
# There is some weird nesting of the location with complexes. See Group-10. Essentially, there are complexes with loc() of this form:
# complex(complex(p(x,loc(Y)), loc(y)), loc(y)), with a single hasComponent statement showing that it is composed of complex(p(x,loc(Y)), loc(y)). I’m still working out exactly how we will be using the hasComponent statements – right now I don’t foresee needing this to be fixed, but I may run into scenarios where this structure causes problems.
# WSH - The highlighted complex presentation is a bug – I’ll fix that too. I de-dup the dimers, but didn’t sort out the locations at the same time.
import os
import copy
import time
import re
from jinja2 import Environment, FileSystemLoader
import click
from toBel import toBel, dedup, dedupList, escapeBelString, setBelVersion
from reactome_webservice import getEntityData, getReactions
# Overwrite logs on each run -> filemode = 'w'
import log_setup
log = log_setup.getLogger(name='root')
# log.debug('This message should go to the log file')
# log.info('So should this')
# log.warning('And this, too')
# Jinja template
PATH = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_ENVIRONMENT = Environment(
autoescape=False,
loader=FileSystemLoader(os.path.join(PATH, 'templates')),
trim_blocks=False)
template_filename = 'belscript.jinja2'
speciesList = ['Homo sapiens', 'Mus musculus', 'Rattus norvegicus']
def convertSpeciesNameToTaxId(name):
species = {
'Homo sapiens': 9606,
'Mus musculus': 10090,
'Rattus norvegicus': 10116,
}
return species[name]
def render_template(template_filename, evidences, pathways=None):
context = {}
# Todo add the following to a configuration file and automate the date
if pathways:
setname = '{} Pathway Reactome Reactions'.format(' and '.join(pathways))
else:
setname = 'All Reactome Reactions'
context['BEL_DOCUMENT_NAME'] = setname
context['BEL_DOCUMENT_DESCRIPTION'] = '{} converted to BEL 1.0'.format(setname)
context['AUTHORS'] = 'Selventa; Nimisha Schneider; Natalie Catlett; William Hayes'
context['DATE'] = time.strftime("%Y-%m-%d")
context['VERSION'] = '0.1'
context['CONTACT_EMAIL'] = '[email protected]'
context['evidences'] = evidences
return TEMPLATE_ENVIRONMENT.get_template(template_filename).render(context)
def buildStatements(catalysts, inputs, outputs):
input_bel = ', '.join([bel for input in inputs for bel in input])
output_bel = ', '.join([bel for output in outputs for bel in output])
rxn = 'rxn(reactants({}), products({}))'.format(input_bel, output_bel)
statements = []
if catalysts:
for catalyst in catalysts:
if catalyst:
for bel in catalyst:
statement = 'cat({}) => {}'.format(bel, rxn)
statements.append(statement)
else:
log.error('No BEL in catalyst: {}'.format(catalysts))
if not statements:
statements.append(rxn)
for results in (catalysts, inputs, outputs):
for result in results:
if result:
for bel in result:
for statement in result[bel]:
statements.append(statement)
else:
log.error('No BEL in result: {}'.format(results))
return statements
def buildBelEvidences(reactionList, belversion, pathways=None):
''' Load reactions and build BEL Evidences'''
rxnUrlTpl = 'http://www.reactome.org/PathwayBrowser/#'
evidences = []
bad_namespaces_evidences = [] # collect bad evidences
# indexCnt = 0
for rxnId, rxnName in reactionList:
print('rxnId: ', rxnId)
# indexCnt += 1
# if indexCnt > 2:
# break
# Process Annotation information
rxnData = getEntityData(rxnId)
if 'stableIdentifier' not in rxnData:
continue
stableId = rxnData['stableIdentifier']['displayName']
rxnUrl = '{}{}'.format(rxnUrlTpl, stableId)
rxnName = escapeBelString(rxnData['displayName'])
rxnType = rxnData['schemaClass']
# Todo collect all compartments and annotate
compartment = 'Unknown'
if 'compartment' in rxnData:
compartment = rxnData['compartment'][0]['displayName']
rxnAuthor = rxnDate = None
if 'created' in rxnData:
try:
matches = re.search(r'(.*?),\s+(\d{4,4}-\d{2,2}-\d{2,2})', rxnData['created']['displayName'])
if matches:
rxnAuthor = matches.group(1)
rxnDate = matches.group(2)
except:
log.info('Rxn - cannot find created date and author in object: {}'.format(rxnId))
if rxnDate and rxnAuthor:
citation = '{{"Online Resource", "{}", "{}", "{}", "{}"}}'.format(rxnName, rxnUrl, rxnDate, rxnAuthor)
else:
citation = '{{"Online Resource", "{}", "{}"}}'.format(rxnName, rxnUrl)
evidence = {
'name': rxnName,
'rxnId': rxnId, # TODO remove after debugging
'rxnType': rxnType,
'compartment': compartment,
'species': rxnData['speciesName'],
'species_tax_id': convertSpeciesNameToTaxId(rxnData['speciesName']),
'summary_text': rxnName,
'citation': citation,
}
# Process BEL Statement
catalysts, inputs, outputs = [], [], []
if 'catalystActivity' in rxnData:
for catalyst in rxnData['catalystActivity']:
catalysts.append(toBel(catalyst['dbId']))
# print('Catalyst: {}'.format(catalyst['dbId']))
if 'input' in rxnData:
for input in dedup(rxnData['input']):
inputs.append(toBel(input['dbId']))
if 'output' in rxnData:
for output in dedup(rxnData['output']):
outputs.append(toBel(output['dbId']))
print('Catalysts ', catalysts)
print('Inputs ', inputs)
print('Outputs ', outputs)
print('\n')
statements = buildStatements(catalysts, inputs, outputs)
evidence['statements'] = dedupList(statements)
bad_namespace_flag = False
for statement in statements:
if 'ENSEMBL' in statement or 'EMBL' in statement:
bad_namespace_flag = True
# with open('tmp_evidences.json', 'a') as f:
# json.dump(evidence, f, indent=4)
# f.write('\n\n')
if bad_namespace_flag:
bad_namespaces_evidences.append(copy.deepcopy(evidence))
else:
evidences.append(copy.deepcopy(evidence))
belscript = render_template(template_filename, evidences, pathways=pathways)
fn = 'reactome.bels'
if belversion == '2':
fn += '2'
with open(fn, 'w') as f:
f.write(belscript)
import json
with open('bad_evidences.json', 'w') as f:
json.dump(bad_namespaces_evidences, f, indent=4)
@click.command()
@click.option('--belversion', '-b', default='1', type=click.Choice(['1', '2']), help="Use Bel 1 by default or select Bel 2")
@click.option('--species', '-s', multiple=True, type=click.Choice(['all', 'Homo sapiens', 'Mus musculus', 'Rattus norvegicus']))
@click.option('--pathways', '-p', default=None, multiple=True, help="Restrict to specific Reactome Pathway(s) - e.g. Metabolism - can use multiple -p Metabolism -p Pathway2 ...")
def main(belversion, species, pathways):
"""Process Reactome into BEL
Example: ./processReactome.py -b 1 -s "Homo sapiens" -s "Mus musculus" -p Metabolism
Example: ./processReactome.py -b 2 -s "Homo sapiens" -p Metabolism -p "Transmembrane transport of small molecules"
Result: reactome.bels
"""
setBelVersion(belversion)
# quit and show help if no arguments are set
if not species:
click.help_option()
print('Here')
if 'all' in species:
species = speciesList
# Collect reactions
reactionList = []
for specie in species:
reactionList.extend(getReactions(specie, pathways=pathways))
# import json
# with open('reactionlist.json', 'w') as f:
# json.dump(reactionList, f, indent=4)
# quit()
# human, mouse and rat share rxnIds
reactionList = set(reactionList)
buildBelEvidences(reactionList, belversion, pathways=pathways)
# buildBelEvidences([('109514', 'Test')])
# # buildBelEvidences([('450092', 'Test')])
# quit()
# # Get Metabolism reactions for human only
# reactionList = getReactions('homo sapiens', pathway='Metabolism')
# quit()
# # Get all Reactome reactions for model organisms
# reactionList = []
# for species in ['Homo sapiens', 'Mus musculus', 'Rattus norvegicus']:
# reactionList.extend(getReactions(species))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
import time
logging.basicConfig(level=logging.ERROR)
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
sys.path.insert(0, top_dir)
import taskflow.engines
from taskflow import exceptions
from taskflow.patterns import unordered_flow as uf
from taskflow import task
from taskflow.tests import utils
from taskflow.utils import misc
# INTRO: In this example we create two tasks which can trigger exceptions
# based on various inputs to show how to analyze the thrown exceptions for
# which types were thrown and handle the different types in different ways.
#
# This is especially important if a set of tasks run in parallel and each of
# those tasks may fail while running. This creates a scenario where multiple
# exceptions have been thrown and those exceptions need to be handled in a
# unified manner. Since an engine does not currently know how to resolve
# those exceptions (someday it could) the code using that engine and activating
# the flows and tasks using that engine will currently have to deal with
# catching those exceptions (and silencing them if this is desired).
#
# NOTE(harlowja): The engine *will* trigger rollback even under multiple
# exceptions being thrown, but at the end of that rollback the engine will
# rethrow these exceptions to the code that called the run() method; allowing
# that code to do further cleanups (if desired).
def print_wrapped(text):
print("-" * (len(text)))
print(text)
print("-" * (len(text)))
class FirstException(Exception):
"""Exception that first task raises."""
class SecondException(Exception):
"""Exception that second task raises."""
class FirstTask(task.Task):
def execute(self, sleep1, raise1):
time.sleep(sleep1)
if not isinstance(raise1, bool):
raise TypeError('Bad raise1 value: %r' % raise1)
if raise1:
raise FirstException('First task failed')
class SecondTask(task.Task):
def execute(self, sleep2, raise2):
time.sleep(sleep2)
if not isinstance(raise2, bool):
raise TypeError('Bad raise2 value: %r' % raise2)
if raise2:
raise SecondException('Second task failed')
def run(**store):
# Creates a flow, each task in the flow will examine the kwargs passed in
# here and based on those kwargs it will behave in a different manner
# while executing; this allows for the calling code (see below) to show
# different usages of the failure catching and handling mechanism.
flow = uf.Flow('flow').add(
FirstTask(),
SecondTask()
)
try:
with utils.wrap_all_failures():
taskflow.engines.run(flow, store=store,
engine_conf='parallel')
except exceptions.WrappedFailure as ex:
unknown_failures = []
for failure in ex:
if failure.check(FirstException):
print("Got FirstException: %s" % failure.exception_str)
elif failure.check(SecondException):
print("Got SecondException: %s" % failure.exception_str)
else:
print("Unknown failure: %s" % failure)
unknown_failures.append(failure)
misc.Failure.reraise_if_any(unknown_failures)
print_wrapped("Raise and catch first exception only")
run(sleep1=0.0, raise1=True,
sleep2=0.0, raise2=False)
# NOTE(imelnikov): in general, sleeping does not guarantee that we'll have both
# task running before one of them fails, but with current implementation this
# works most of times, which is enough for our purposes here (as an example).
print_wrapped("Raise and catch both exceptions")
run(sleep1=1.0, raise1=True,
sleep2=1.0, raise2=True)
print_wrapped("Handle one exception, and re-raise another")
try:
run(sleep1=1.0, raise1=True,
sleep2=1.0, raise2='boom')
except TypeError as ex:
print("As expected, TypeError is here: %s" % ex)
else:
assert False, "TypeError expected"
|
from rest_framework.pagination import PageNumberPagination
class PaginateContent(PageNumberPagination):
"""
Custom pagination class
"""
page_size = 10
page_size_query_param = 'page_size'
|
# ~\AppData\Local\Programs\Python\Python38\python.exe
# config.py
import os
basedir = os.path.abspath(os.path.dirname(__file__)) # Gets current folder
class Config(object):
# Creates environment variables
SECRET_KEY = os.environ.get('SECRET_KEY') or 'this-is-an-example-password'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
import pandas as pd
import Levenshtein
import numpy as np
from anytree.search import find
from utils.category_tree import get_category_tree
from utils.io_custom import read_pickle_object
from scipy.spatial.distance import cosine
import re
def find_node(id, tree):
return find(tree, lambda node: node.name == id)
def get_relative_depth(id, tree):
return (find_node(id, tree).depth - find_node(872901, tree).depth)
def count_children(id, tree):
return len(find_node(id, tree).children)
def count_descendants(id, tree):
return len(find_node(id, tree).descendants)
def preprocessing_text(s):
"""
Предобработка текста.
:param s: str - входная строка.
:return: str - обработанная строка.
"""
return str(" ".join(re.findall("[a-zA-Zа-яА-Я0-9]+", s)).lower())
def get_levenshtein_distance_between(first_line, second_line):
"""
Получает расстояние Левенштейна между двумя строками.
:param first_line: str - первая строка.
:param second_line: str - вторая строка.
:return: int - расстояние левеншейна между этими строками.
"""
return Levenshtein.distance(first_line, second_line)
def get_lev_dist_between_query_category(query, category):
"""
Получает расстояние Левенштейна между двумя сериями.
:param query: pd.Series - запрос.
:param second_line: pd.Series - категория.
:return: np.array, int - расстояние левеншейна между соответствующими элементами серий.
"""
levenshtein_distances = []
for query, category in zip(query.values, category.values):
current_distance = get_levenshtein_distance_between(query, category)
levenshtein_distances.append(current_distance)
return np.array(levenshtein_distances)
def get_brands_and_products_lists(path_to_data):
"""
Получаем и преобразовываем списки брендов и продуктов из файлов.
"""
brands = pd.read_csv(path_to_data + "/unique_brands.csv")
brands = [str(brand) for brand in brands.iloc[:, 0]]
products = pd.read_csv(path_to_data + "/unique_products.csv")
products = [str(product) for product in products.iloc[:, 0]]
return brands, products
def create_data_with_features(path_to_data):
"""
Загружает данные для обучения и генерирует для них.
:param path_to_data: str - относительный путь к данным для обучения.
:return data: pd.DataFrame - датафрейм с кучей признаков
Оставлено для обратной совместимости с двумя блокнотами.
"""
data = pd.read_csv(path_to_data + "/data_for_model.csv")
return get_data_with_feature(data, path_to_data)
def get_cosine_dist_between_query_category(query, category, vectorizer):
"""
Получает косинусное расстояние между двумя колонками датафрейма.
:param query: pd.Series - запрос.
:param second_line: pd.Series - категория.
:param vectorizer: sklearn.feature_extraction.text.TfidfVectorizer - предобученный векторайзер на запросах и категориях из трейн выборки.
:return: np.array, int - косинусное расстояние между соответствующими элементами серий.
"""
query_sparse_matrix = vectorizer.transform(query.values)
category_sparse_matrix = vectorizer.transform(category.values)
distances = []
for query_vec, category_vec in zip(query_sparse_matrix, category_sparse_matrix):
current_distance = cosine(query_vec.toarray(), category_vec.toarray())
distances.append(current_distance)
return np.array(distances)
def get_data_with_feature(data, path_to_data):
"""
Генерирует признаки для обучающих и валидационных данных.
:param data: pd.DataFrame - обучающие или валидационные данные с колонками [query, category_id, category_name, is_redirect]
:param path_to_data: str - относительный путь к данным о брендах и продуктах.
:return data: pd.DataFrame - датафрейм с кучей признаков
"""
brands, products = get_brands_and_products_lists(path_to_data)
root = get_category_tree(path_to_data)
data['query'] = data['query'].apply(preprocessing_text)
data['category_name'] = data['category_name'].apply(preprocessing_text)
data['len_of_query'] = data['query'].apply(lambda query: len(query))
data['num_of_word_in_query'] = data['query'].apply(
lambda query:
len(query.split(' '))
)
data['category_name'] = data['category_name'].apply(preprocessing_text)
data['len_of_category'] = data['category_name'].apply(
lambda category:
len(category)
)
data['num_of_word_in_category'] = data['category_name'].apply(
lambda category:
len(category.split(' '))
)
data['how_match_brands_name_in_query'] = data['query'].apply(
lambda query:
sum([True for brand in brands if query.find(brand) != -1])
)
data['how_match_products_name_in_query'] = data['query'].apply(
lambda query:
sum([True for product in products if query.find(product) != -1])
)
data['mean_word_len_in_category'] = data['category_name'].apply(
lambda category_name:
np.mean([len(word) for word in category_name.split(' ')])
)
data['mean_word_len_in_query'] = data['query'].apply(
lambda query:
np.mean([len(word) for word in query.split(' ')])
)
data['max_word_len_in_category'] = data['category_name'].apply(
lambda category_name:
np.max([len(word) for word in category_name.split(' ')])
)
data['max_word_len_in_query'] = data['query'].apply(
lambda query:
np.max([len(word) for word in query.split(' ')])
)
data['min_word_len_in_category'] = data['category_name'].apply(
lambda category_name:
np.min([len(word) for word in category_name.split(' ')])
)
data['min_word_len_in_query'] = data['query'].apply(
lambda query:
np.min([len(word) for word in query.split(' ')])
)
data['is_query_long'] = data['len_of_query'].apply(lambda l: int(l > 50))
# TODO проверить генерацию признаков с дерева категорий (3 штуки):
data['relative_depth'] = data['category_id'].apply(
lambda category_id:
get_relative_depth(category_id, root)
)
data['children_count'] = data['category_id'].apply(
lambda category_id:
count_children(category_id, root)
)
data['descendants_count'] = data['category_id'].apply(
lambda category_id:
count_descendants(category_id, root)
)
data['lev_dist'] = get_lev_dist_between_query_category(data['query'],
data['category_name'])
vectorizer = read_pickle_object(path_to_data + '/vectorizer.obj')
data['cosine_dist'] = get_cosine_dist_between_query_category(data['query'],
data['category_name'],
vectorizer)
# data['number_of_children_category'] = get_relative_depth(data['category_id'])
# data['number_of_descendants_category'] = count_descendants(data['category_id'])
# data['category_depth'] = get_relative_depth(data['category_id'])
data = data.drop(columns=['category_id', 'query', 'category_name'])
return data
|
"""
crystal_tools
part of pyTEMlib
Author: Gerd Duscher
Provides convenient functions to make most regular crystal structures
Contains also a dictionary of crystal structures and atomic form factors
Units:
everything is in SI units, except length is given in nm.
angles are assumed to be in degree but will be internally converted to rad
Usage:
See the notebooks for examples of these routines
"""
import numpy as np
import itertools
import ase
import ase.spacegroup
import ase.build
import ase.data.colors
import matplotlib.pylab as plt # basic plotting
_spglib_present = True
try:
import spglib
except ModuleNotFoundError:
_spglib_present = False
if _spglib_present:
print('Symmetry functions of spglib enabled')
else:
print('spglib not installed; Symmetry functions of spglib disabled')
# from mpl_toolkits.mplot3d import Axes3D # 3D plotting
# from matplotlib.patches import Circle # , Ellipse, Rectangle
# from matplotlib.collections import PatchCollection
def get_dictionary(atoms):
"""
structure dictionary from ase.Atoms object
"""
tags = {'unit_cell': atoms.cell.array,
'elements': atoms.get_chemical_formula(),
'base': atoms.get_scaled_positions(),
'metadata': atoms.info}
return tags
def atoms_from_dictionary(tags):
atoms = ase.Atoms(cell=tags['unit_cell'],
symbols=tags['elements'],
scaled_positions=tags['base'])
if 'metadata' in tags:
atoms.info = tags['metadata']
return atoms
def get_symmetry(atoms, verbose=True):
"""
Symmetry analysis with spglib
spglib must be installed
Parameters
----------
atoms: ase.Atoms object
crystal structure
verbose: bool
Returns
-------
"""
if _spglib_present:
if verbose:
print('#####################')
print('# Symmetry Analysis #')
print('#####################')
base = atoms.get_scaled_positions()
for i, atom in enumerate(atoms):
if verbose:
print(f'{i + 1}: {atom.number} = {2} : [{base[i][0]:.2f}, {base[i][1]:.2f}, {base[i][2]:.2f}]')
lattice = (atoms.cell, atoms.get_scaled_positions(), atoms.numbers)
spgroup = spglib.get_spacegroup(lattice, symprec=1e-2)
sym = spglib.get_symmetry(lattice)
if verbose:
print(" Spacegroup is %s." % spgroup)
print(' Crystal has {0} symmetry operation'.format(sym['rotations'].shape[0]))
p_lattice, p_positions, p_numbers = spglib.find_primitive(lattice, symprec=1e-5)
print("\n########################\n #Basis vectors of primitive Cell:")
for i in range(3):
print('[{0:.4f}, {1:.4f}, {2:.4f}]'.format(p_lattice[i][0], p_lattice[i][1], p_lattice[i][2]))
print('There {0} atoms and {1} species in primitive unit cell:'.format(len(p_positions), p_numbers))
else:
print('spglib is not installed')
return True
def set_bond_radii(atoms):
bond_radii = np.ones(len(atoms))
for i in range(len(atoms)):
bond_radii[i] = electronFF[atoms.symbols[i]]['bond_length'][1]
atoms.info['bond_radii'] = bond_radii
def jmol_viewer(atoms, size=2):
"""
jmol viewer of ase .Atoms object
requires jupyter-jsmol to be installed (available through conda or pip)
Parameter
---------
atoms: ase.Atoms
structure info
size: int, list, or np.array of size 3; default 1
size of unit_cell; maximum = 8
Returns
-------
view: JsmolView object
Example
-------
from jupyter_jsmol import JsmolView
import ase
import ase.build
import itertools
import numpy as np
atoms = ase.build.bulk('Cu', 'fcc', a=5.76911, cubic=True)
for pos in list(itertools.product([0.25, .75], repeat=3)):
atoms += ase.Atom('Al', al2cu.cell.lengths()*pos)
view = plot_ase(atoms, size = 8)
display(view)
"""
try:
from jupyter_jsmol import JsmolView
from IPython.display import display
except ImportError:
print('this function is based on jupyter-jsmol, please install with: \n '
'conda install -c conda-forge jupyter-jsmol')
return
if isinstance(size, int):
size = [size] * 3
[a, b, c] = atoms.cell.lengths()
[alpha, beta, gamma] = atoms.cell.angles()
view = JsmolView.from_ase(atoms, f"{{{size[0]} {size[1]} {size[2]}}}"
f" unitcell {{{a:.3f} {b:.3f} {c:.3f} {alpha:.3f} {beta:.3f} {gamma:.3f}}}")
display(view)
return view
def plot_super_cell(super_cell, shift_x=0.):
""" make a super_cell to plot with extra atoms at periodic boundaries"""
if not isinstance(super_cell, ase.Atoms):
raise TypeError('Need an ase Atoms object')
super_cell2plot = super_cell * (2, 2, 2)
super_cell2plot.positions[:, 0] = super_cell2plot.positions[:, 0] - super_cell2plot.cell[0, 0] * shift_x
del super_cell2plot[super_cell2plot.positions[:, 2] > super_cell.cell[2, 2] + 0.1]
del super_cell2plot[super_cell2plot.positions[:, 1] > super_cell.cell[1, 1] + 0.1]
del super_cell2plot[super_cell2plot.positions[:, 0] > super_cell.cell[0, 0] + 0.1]
del super_cell2plot[super_cell2plot.positions[:, 0] < -0.1]
super_cell2plot.cell = super_cell.cell * (1, 1, 1)
return super_cell2plot
def ball_and_stick(atoms, extend=1, max_bond_length=0.):
"""Calculates the data to plot a ball and stick model
Parameters
----------
atoms: ase.Atoms object
object containing the structural information like 'cell', 'positions', and 'symbols' .
extend: integer or list f 3 integers
The *extend* argument scales the effective cell in which atoms
will be included. It must either be a list of three integers or a single
integer scaling all 3 directions. By setting this value to one,
all corner and edge atoms will be included in the returned cell.
This will of cause make the returned cell non-repeatable, but this is
very useful for visualisation.
max_bond_length: 1 float
The max_bond_length argument defines the distance for which a bond will be shown.
If max_bond_length is zero, the tabulated atom radii will be used.
Returns
-------
super_cell: ase.Atoms object
structure with additional information in info dictionary
"""
if not isinstance(atoms, ase.Atoms):
raise TypeError('Need an ase Atoms object')
from ase import neighborlist
from scipy import sparse
from scipy.sparse import dok_matrix
super_cell = plot_super_cell(atoms*extend)
cell = super_cell.cell.array
# Corners and Outline of unit cell
h = (0, 1)
corner_vectors = np.dot(np.array(list(itertools.product(h, h, h))), cell)
corner_matrix = dok_matrix((8, 8), dtype=bool)
trace = [[0, 1], [1, 3], [2, 3], [0, 2], [0, 4], [4, 5], [5, 7], [6, 7], [4, 6], [1, 5], [2, 6], [3, 7]]
for s, e in trace:
corner_matrix[s, e] = True
# List of bond lengths taken from electronFF database below
bond_lengths = []
for atom in super_cell:
bond_lengths.append(electronFF[atom.symbol]['bond_length'][1])
super_cell.set_cell(cell*2, scale_atoms=False) # otherwise, corner atoms have distance 0
neighbor_list = neighborlist.NeighborList(bond_lengths, self_interaction=False, bothways=False)
neighbor_list.update(super_cell)
bond_matrix = neighbor_list.get_connectivity_matrix()
del_double = []
for (k, s) in bond_matrix.keys():
if k > s:
del_double.append((k, s))
for key in del_double:
bond_matrix.pop(key)
if super_cell.info is None:
super_cell.info = {}
super_cell.info['plot_cell'] = {'bond_matrix': bond_matrix, 'corner_vectors': corner_vectors,
'bond_length': bond_lengths, 'corner_matrix': corner_matrix}
super_cell.set_cell(cell/2, scale_atoms=False)
return super_cell
def plot_unit_cell(atoms, extend=1, max_bond_length=1.0):
"""
Simple plot of unit cell
"""
super_cell = ball_and_stick(atoms, extend=extend, max_bond_length=max_bond_length)
corners = super_cell.info['plot_cell']['corner_vectors']
positions = super_cell.positions - super_cell.cell.lengths()/2
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# draw unit_cell
for line in super_cell.info['plot_cell']['corner_matrix'].keys():
ax.plot3D(corners[line, 0], corners[line, 1], corners[line, 2], color="blue")
# draw bonds
bond_matrix = super_cell.info['plot_cell']['bond_matrix']
for bond in super_cell.info['plot_cell']['bond_matrix'].keys():
ax.plot3D(positions[bond, 0], positions[bond, 1], positions[bond, 2], color="black", linewidth=4)
# , tube_radius=0.02)
# draw atoms
ax.scatter(super_cell.positions[:, 0], super_cell.positions[:, 1], super_cell.positions[:, 2],
color=tuple(jmol_colors[super_cell.get_atomic_numbers()]), alpha=1.0, s=50)
maximum_position = super_cell.positions.max()*1.05
ax.set_proj_type('ortho')
ax.set_zlim(-maximum_position/2, maximum_position/2)
ax.set_ylim(-maximum_position/2, maximum_position/2)
ax.set_xlim(-maximum_position/2, maximum_position/2)
if 'name' in super_cell.info:
ax.set_title(super_cell.info['name'])
ax.set_xlabel('x [Å]')
ax.set_ylabel('y [Å]')
ax.set_zlabel('z [Å]')
return fig
# Jmol colors. See: http://jmol.sourceforge.net/jscolors/#color_U
jmol_colors = ase.data.colors.jmol_colors
def structure_by_name(crystal_name):
"""
Provides crystal structure in ase.Atoms format.
Additional information is stored in the info attribute as a dictionary
Parameter
---------
crystal_name: str
Please note that the chemical expressions are not case-sensitive.
Returns
-------
atoms: ase.Atoms
structure
Example
-------
>> # for a list of pre-defined crystal structures
>> import pyTEMlib.crystal_tools
>> print(pyTEMlib.crystal_tools.crystal_data_base.keys())
>>
>> atoms = pyTEMlib.crystal_tools.structure_by_name('Silicon')
>> print(atoms)
>> print(atoms.info)
"""
# Check whether name is in the crystal_data_base
import ase
import ase.build
if crystal_name.lower() in cdb:
tags = cdb[crystal_name.lower()].copy()
else:
print(f'Crystal name {crystal_name.lower()} not defined')
return
if 'symmetry' in tags:
if tags['symmetry'].lower() == 'fcc':
atoms = ase.build.bulk(tags['elements'], 'fcc', a=tags['a'], cubic=True)
elif tags['symmetry'].lower() == 'bcc':
atoms = ase.build.bulk(tags['elements'], 'bcc', a=tags['a'], cubic=True)
elif tags['symmetry'].lower() == 'diamond':
import ase.lattice.cubic
atoms = ase.lattice.cubic.Diamond(tags['elements'], latticeconstant=tags['a'])
elif 'rocksalt' in tags['symmetry']: # B1
import ase.lattice.compounds
atoms = ase.lattice.compounds.Rocksalt(tags['elements'], latticeconstant=tags['a'])
elif 'zincblende' in tags['symmetry']:
import ase.lattice.compounds
atoms = ase.lattice.compounds.B3(tags['elements'], latticeconstant=tags['a'])
elif 'B2' in tags['symmetry']:
import ase.lattice.compounds
atoms = ase.lattice.compounds.B2(tags['elements'], latticeconstant=tags['a'])
elif 'graphite' in tags['symmetry']:
base = [(0, 0, 0), (0, 0, 1/2), (2/3, 1/3, 0), (1/3, 2/3, 1/2)]
structure_matrix = np.array([[tags['a'], 0., 0.],
[np.cos(np.pi/3*2)*tags['a'], np.sin(np.pi/3*2)*tags['a'], 0.],
[0., 0., tags['c']]])
atoms = ase.Atoms(tags['elements'], cell=structure_matrix, scaled_positions=base)
elif 'perovskite' in tags['symmetry']:
import ase.spacegroup
atom_positions = [(0.0, 0.0, 0.0), (0.5, 0.5, 0.5), (0.5, 0.5, 0.0)]
atoms = ase.spacegroup.crystal(tags['elements'], atom_positions, spacegroup=221, cellpar=tags['a'])
elif 'wurzite' in tags['symmetry']:
import ase.spacegroup
atom_positions = [(1/3, 2/3, 0.0), (1/3, 2/3, tags['u'])]
atoms = ase.spacegroup.crystal(tags['elements'], atom_positions, spacegroup=186,
cellpar=[tags['a'], tags['a'], tags['c'], 90, 90, 120])
elif 'rutile' in tags['symmetry']:
import ase.spacegroup
atoms = ase.spacegroup.crystal(tags['elements'], basis=[(0, 0, 0), (0.3, 0.3, 0.0)],
spacegroup=136, cellpar=[tags['a'], tags['a'], tags['c'], 90, 90, 90])
elif 'dichalcogenide' in tags['symmetry']:
import ase.spacegroup
u = tags['u']
base = [(1 / 3., 2 / 3., 1 / 4.), (2 / 3., 1 / 3., 3 / 4.),
(2 / 3., 1 / 3., 1 / 4. + u), (2 / 3., 1 / 3., 1 / 4. - u),
(1 / 3., 2 / 3., 3 / 4. + u), (1 / 3., 2 / 3., 3 / 4. - u)]
atoms = ase.spacegroup.crystal(tags['elements'][0] * 2 + tags['elements'][1] * 4, base, spacegroup=194,
cellpar=[tags['a'], tags['a'], tags['c'], 90, 90, 120])
elif tags['symmetry'].lower() in ['primitive', 'hexagonal']:
atoms = ase.Atoms(tags['elements'], cell=tags['unit_cell'], scaled_positions=tags['base'])
else:
print(' symmetry of structure is wrong')
atoms.info = {'structure': {'reference': tags['reference'], 'link': tags['link']},
'title': tags['crystal_name']}
return atoms
# crystal data base cbd
cdb = {'aluminum': {'crystal_name': 'aluminum',
'symmetry': 'FCC',
'elements': 'Al',
'a': 4.05, # Angstrom
'reference': 'W. Witt, Z. Naturforsch. A, 1967, 22A, 92',
'link': 'http://doi.org/10.1515/zna-1967-0115'}}
cdb['al'] = cdb['aluminium'] = cdb['aluminum']
cdb['gold'] = {'crystal_name': 'gold',
'symmetry': 'FCC',
'elements': 'Au',
'a': 4.0782, # Angstrom
'reference': '',
'link': ''}
cdb['au'] = cdb['gold']
cdb['silver'] = {'crystal_name': 'silver',
'symmetry': 'FCC',
'elements': 'Ag',
'a': 4.0853, # Angstrom
'reference': '', 'link': ''}
cdb['ag'] = cdb['silver']
cdb['copper'] = {'crystal_name': 'copper',
'symmetry': 'FCC',
'elements': 'Cu',
'a': 4.0853, # Angstrom
'reference': '', 'link': ''}
cdb['cu'] = cdb['copper']
cdb['diamond'] = {'crystal_name': 'diamond',
'symmetry': 'diamond',
'elements': 'C',
'a': 3.5668, # Angstrom
'reference': '', 'link': ''}
cdb['germanium'] = {'crystal_name': 'germanium',
'symmetry': 'diamond',
'elements': 'Ge',
'a': 5.66806348, # Angstrom for 300K
'reference': 'H. P. Singh, Acta Crystallogr., 1968, 24A, 469',
'link': 'https://doi.org/10.1107/S056773946800094X'}
cdb['ge'] = cdb['germanium']
cdb['silicon'] = {'crystal_name': 'silicon',
'symmetry': 'diamond',
'elements': 'Si',
'a': 5.430880, # Angstrom for 300K
'reference': 'C. R. Hubbard, H. E. Swanson, and F. A. Mauer, J. Appl. Crystallogr., 1975, 8, 45',
'link': 'https://doi.org/10.1107/S0021889875009508'}
cdb['si'] = cdb['silicon']
cdb['gaas'] = {'crystal_name': 'GaAs',
'symmetry': 'zincblende(B3)',
'elements': ['Ga', 'As'],
'a': 5.65325, # Angstrom for 300K
'reference': 'J.F.C. Baker, M. Hart, M.A.G. Halliwell, R. Heckingbottom, Solid-State Electronics, 19, '
'1976, 331-334,',
'link': 'https://doi.org/10.1016/0038-1101(76)90031-9'}
cdb['fcc fe'] = {'crystal_name': 'FCC Fe',
'symmetry': 'FCC',
'elements': 'Fe',
'a': 3.3571, # Angstrom
'reference': 'R. Kohlhaas, P. Donner, and N. Schmitz-Pranghe, Z. Angew. Phys., 1967, 23, 245',
'link': ''}
cdb['iron'] = {'crystal_name': 'BCC Fe',
'symmetry': 'BCC',
'elements': 'Fe',
'a': 2.866, # Angstrom
'reference': 'Z. S. Basinski, W. Hume-Rothery and A. L. Sutton, Proceedings of the Royal Society of '
'London. Series A, Mathematical and Physical Sciences Vol. 229, No. 1179 '
'(May 24, 1955), pp. 459-467',
'link': 'http://www.jstor.org/stable/99693'}
cdb['bcc fe'] = cdb['alpha iron'] = cdb['iron']
cdb['srtio3'] = {'crystal_name': 'SrTiO3',
'symmetry': 'perovskite',
'elements': ['Sr', 'Ti', 'O'],
'a': 3.905268, # Angstrom
'reference': 'M. Schmidbauer, A. Kwasniewski and J. Schwarzkopf, Acta Cryst. (2012). B68, 8-14',
'link': 'http://doi.org/10.1107/S0108768111046738'}
cdb['strontium titanate'] = cdb['srtio3']
cdb['graphite'] = {'crystal_name': 'graphite',
'symmetry': 'graphite hexagonal',
'elements': 'C4',
'a': 2.46772414,
'c': 6.711,
'reference': 'P. Trucano and R. Chen, Nature, 1975, 258, 136',
'link': 'https://doi.org/10.1038/258136a0'}
cdb['cscl'] = {'crystal_name': 'CsCl',
'symmetry': 'CsCl (B2)',
'a': 4.209, # Angstrom
'elements': ['Cs', 'Cl'],
'reference': '', 'link': ''}
cdb['cesium chlorid'] = cdb['cscl']
cdb['mgo'] = {'crystal_name': 'MgO',
'symmetry': 'rocksalt (B1)',
'elements': ['Mg', 'O'],
'a': 4.256483, # Angstrom
'reference': '', 'link': ''}
cdb['titanium nitride'] = {'crystal_name': 'TiN',
'symmetry': 'rocksalt (B1)',
'elements': ['Ti', 'N'],
'a': 4.25353445, # Angstrom
'reference': '', 'link': '',
'space_group': 225,
'symmetry_name': 'Fm-3m'}
cdb['zno wurzite'] = {'crystal_name': 'ZnO Wurzite',
'symmetry': 'wurzite',
'elements': ['Zn', 'O'],
'a': 3.278, # Angstrom
'c': 5.292, # Angstrom
'u': 0.382,
'reference': '', 'link': ''}
cdb['zno'] = cdb['wzno'] = cdb['zno wurzite']
cdb['gan'] = {'crystal_name': 'GaN Wurzite',
'symmetry': 'wurzite',
'elements': ['Ga', 'N'],
'a': 3.186, # Angstrom
'c': 5.186, # Angstrom
'u': 0.376393,
'reference': '', 'link': ''}
cdb['gan wurzite'] = cdb['wgan'] = cdb['gallium nitride'] = cdb['gan']
cdb['tio2'] = {'crystal_name': 'TiO2 rutile',
'symmetry': 'rutile',
'elements': ['Ti', 'O'],
'a': 4.6, # Angstrom
'c': 2.95, # Angstrom
'reference': '', 'link': ''}
cdb['mos2'] = {'crystal_name': 'MoS2',
'symmetry': 'dichalcogenide',
'elements': ['Mo', 'S'],
'a': 3.19031573, # Angstrom
'c': 14.87900430, # Angstrom
'u': 0.105174,
'reference': '', 'link': ''}
cdb['ws2'] = {'crystal_name': 'WS2',
'symmetry': 'dichalcogenide',
'elements': ['W', 'S'],
'a': 3.19073051, # Angstrom
'c': 14.20240204, # Angstrom
'u': 0.110759,
'reference': '', 'link': ''}
cdb['wse2'] = {'crystal_name': 'WSe2',
'symmetry': 'dichalcogenide',
'elements': ['W', 'Se'],
'a': 3.32706918, # Angstrom
'c': 15.06895072, # Angstrom
'u': 0.111569,
'reference': '', 'link': ''}
cdb['mose2'] = {'crystal_name': 'MoSe2',
'symmetry': 'dichalcogenide',
'elements': ['Mo', 'Se'],
'a': 3.32694913, # Angstrom
'c': 15.45142322, # Angstrom
'u': 0.108249,
'reference': '', 'link': ''}
a_l = 0.3336
c_l = 0.4754
base_l = [(2. / 3., 1. / 3., .5), (1. / 3., 2. / 3., 0.), (2. / 3., 1. / 3., 0.), (1. / 3., 2. / 3., .5)]
cdb['zno hexagonal'] = {'crystal_name': 'ZnO hexagonal',
'symmetry': 'hexagonal',
'a': a_l, # nm
'c': c_l, # not np.sqrt(8/3)*1
'elements': ['Zn', 'Zn', 'O', 'O'],
'unit_cell': [[a_l, 0., 0.],
[np.cos(120 / 180 * np.pi) * a_l, np.sin(120 / 180 * np.pi) * a_l, 0.],
[0., 0., c_l]],
'base': np.array(base_l),
'reference': '', 'link': ''}
cdb['pdse2'] = {'crystal_name': 'PdSe2',
'symmetry': 'primitive',
'unit_cell': (np.identity(3) * (.579441832, 0.594542204, 0.858506072)),
'elements': ['Pd'] * 4 + ['Se'] * 8,
'base': np.array([[.5, .0, .0], [.0, 0.5, 0.0],
[.5, 0.5, 0.5], [.0, 0.5, 0.5],
[0.611300, 0.119356, 0.585891],
[0.111300, 0.380644, 0.414109],
[0.388700, 0.619356, 0.914109],
[0.888700, 0.880644, 0.085891],
[0.111300, 0.119356, 0.914109],
[0.611300, 0.380644, 0.085891],
[0.888700, 0.619356, 0.585891],
[0.388700, 0.880644, 0.414109]]),
'reference': '', 'link': ''}
crystal_data_base = cdb
# From Appendix C of Kirkland, "Advanced Computing in Electron Microscopy", 2nd ed.
electronFF = {
# form factor coefficients
# Z= 6, chisq= 0.143335
# a1 b1 a2 b2
# a3 b3 c1 d1
# c2 d2 c3 d3
# name of the file: feKirkland.txt
# converted with program sortFF.py
# form factor parametrized in 1/Angstrom
# bond_length as a list of atom Sizes, bond radii, angle radii, H-bond radii
'H': {'Z': 1, 'chisq': 0.170190,
'bond_length': [0.98, 0.78, 1.20, 0],
'fa': [4.20298324e-003, 6.27762505e-002, 3.00907347e-002],
'fb': [2.25350888e-001, 2.25366950e-001, 2.25331756e-001],
'fc': [6.77756695e-002, 3.56609237e-003, 2.76135815e-002],
'fd': [4.38854001e+000, 4.03884823e-001, 1.44490166e+000]},
'He': {'Z': 2, 'chisq': 0.396634,
'bond_length': [1.45, 1.25, 1.40, 0],
'fa': [1.87543704e-005, 4.10595800e-004, 1.96300059e-001],
'fb': [2.12427997e-001, 3.32212279e-001, 5.17325152e-001],
'fc': [8.36015738e-003, 2.95102022e-002, 4.65928982e-007],
'fd': [3.66668239e-001, 1.37171827e+000, 3.75768025e+004]},
'Li': {'Z': 3, 'chisq': 0.286232,
'bond_length': [1.76, 1.56, 1.82, 0],
'fa': [7.45843816e-002, 7.15382250e-002, 1.45315229e-001],
'fb': [8.81151424e-001, 4.59142904e-002, 8.81301714e-001],
'fc': [1.12125769e+000, 2.51736525e-003, 3.58434971e-001],
'fd': [1.88483665e+001, 1.59189995e-001, 6.12371000e+000]},
'Be': {'Z': 4, 'chisq': 0.195442,
'bond_length': [1.33, 1.13, 1.70, 0],
'fa': [6.11642897e-002, 1.25755034e-001, 2.00831548e-001],
'fb': [9.90182132e-002, 9.90272412e-002, 1.87392509e+000],
'fc': [7.87242876e-001, 1.58847850e-003, 2.73962031e-001],
'fd': [9.32794929e+000, 8.91900236e-002, 3.20687658e+000]},
'B': {'Z': 5, 'chisq': 0.146989,
'bond_length': [1.18, 0.98, 2.08, 0],
'fa': [1.25716066e-001, 1.73314452e-001, 1.84774811e-001],
'fb': [1.48258830e-001, 1.48257216e-001, 3.34227311e+000],
'fc': [1.95250221e-001, 5.29642075e-001, 1.08230500e-003],
'fd': [1.97339463e+000, 5.70035553e+000, 5.64857237e-002]},
'C': {'Z': 6, 'chisq': 0.102440,
'bond_length': [1.12, 0.92, 1.95, 0],
'fa': [2.12080767e-001, 1.99811865e-001, 1.68254385e-001],
'fb': [2.08605417e-001, 2.08610186e-001, 5.57870773e+000],
'fc': [1.42048360e-001, 3.63830672e-001, 8.35012044e-004],
'fd': [1.33311887e+000, 3.80800263e+000, 4.03982620e-002]},
'N': {'Z': 7, 'chisq': 0.060249,
'bond_length': [1.08, 0.88, 1.85, 1.30],
'fa': [5.33015554e-001, 5.29008883e-002, 9.24159648e-002],
'fb': [2.90952515e-001, 1.03547896e+001, 1.03540028e+001],
'fc': [2.61799101e-001, 8.80262108e-004, 1.10166555e-001],
'fd': [2.76252723e+000, 3.47681236e-002, 9.93421736e-001]},
'O': {'Z': 8, 'chisq': 0.039944,
'bond_length': [1.09, 0.89, 1.70, 1.40],
'fa': [3.39969204e-001, 3.07570172e-001, 1.30369072e-001],
'fb': [3.81570280e-001, 3.81571436e-001, 1.91919745e+001],
'fc': [8.83326058e-002, 1.96586700e-001, 9.96220028e-004],
'fd': [7.60635525e-001, 2.07401094e+000, 3.03266869e-002]},
'F': {'Z': 9, 'chisq': 0.027866,
'bond_length': [1.30, 1.10, 1.73, 0],
'fa': [2.30560593e-001, 5.26889648e-001, 1.24346755e-001],
'fb': [4.80754213e-001, 4.80763895e-001, 3.95306720e+001],
'fc': [1.24616894e-003, 7.20452555e-002, 1.53075777e-001],
'fd': [2.62181803e-002, 5.92495593e-001, 1.59127671e+000]},
'Ne': {'Z': 10, 'chisq': 0.021836,
'bond_length': [1.50, 1.30, 1.54, 0],
'fa': [4.08371771e-001, 4.54418858e-001, 1.44564923e-001],
'fb': [5.88228627e-001, 5.88288655e-001, 1.21246013e+002],
'fc': [5.91531395e-002, 1.24003718e-001, 1.64986037e-003],
'fd': [4.63963540e-001, 1.23413025e+000, 2.05869217e-002]},
'Na': {'Z': 11, 'chisq': 0.064136,
'bond_length': [2.10, 1.91, 2.27, 0],
'fa': [1.36471662e-001, 7.70677865e-001, 1.56862014e-001],
'fb': [4.99965301e-002, 8.81899664e-001, 1.61768579e+001],
'fc': [9.96821513e-001, 3.80304670e-002, 1.27685089e-001],
'fd': [2.00132610e+001, 2.60516254e-001, 6.99559329e-001]},
'Mg': {'Z': 12, 'chisq': 0.051303,
'bond_length': [1.80, 1.60, 1.73, 0],
'fa': [3.04384121e-001, 7.56270563e-001, 1.01164809e-001],
'fb': [8.42014377e-002, 1.64065598e+000, 2.97142975e+001],
'fc': [3.45203403e-002, 9.71751327e-001, 1.20593012e-001],
'fd': [2.16596094e-001, 1.21236852e+001, 5.60865838e-001]},
'Al': {'Z': 13, 'chisq': 0.049529,
'bond_length': [1.60, 1.43, 2.05, 0],
'fa': [7.77419424e-001, 5.78312036e-002, 4.26386499e-001],
'fb': [2.71058227e+000, 7.17532098e+001, 9.13331555e-002],
'fc': [1.13407220e-001, 7.90114035e-001, 3.23293496e-002],
'fd': [4.48867451e-001, 8.66366718e+000, 1.78503463e-001]},
'Si': {'Z': 14, 'chisq': 0.071667,
'bond_length': [1.52, 1.32, 2.10, 0],
'fa': [1.06543892e+000, 1.20143691e-001, 1.80915263e-001],
'fb': [1.04118455e+000, 6.87113368e+001, 8.87533926e-002],
'fc': [1.12065620e+000, 3.05452816e-002, 1.59963502e+000],
'fd': [3.70062619e+000, 2.14097897e-001, 9.99096638e+000]},
'P': {'Z': 15, 'chisq': 0.047673,
'bond_length': [1.48, 1.28, 2.08, 0],
'fa': [1.05284447e+000, 2.99440284e-001, 1.17460748e-001],
'fb': [1.31962590e+000, 1.28460520e-001, 1.02190163e+002],
'fc': [9.60643452e-001, 2.63555748e-002, 1.38059330e+000],
'fd': [2.87477555e+000, 1.82076844e-001, 7.49165526e+000]},
'S': {'Z': 16, 'chisq': 0.033482,
'bond_length': [1.47, 1.27, 2.00, 0],
'fa': [1.01646916e+000, 4.41766748e-001, 1.21503863e-001],
'fb': [1.69181965e+000, 1.74180288e-001, 1.67011091e+002],
'fc': [8.27966670e-001, 2.33022533e-002, 1.18302846e+000],
'fd': [2.30342810e+000, 1.56954150e-001, 5.85782891e+000]},
'Cl': {'Z': 17, 'chisq': 0.206186,
'bond_length': [1.70, 1.50, 1.97, 0],
'fa': [9.44221116e-001, 4.37322049e-001, 2.54547926e-001],
'fb': [2.40052374e-001, 9.30510439e+000, 9.30486346e+000],
'fc': [5.47763323e-002, 8.00087488e-001, 1.07488641e-002],
'fd': [1.68655688e-001, 2.97849774e+000, 6.84240646e-002]},
'Ar': {'Z': 18, 'chisq': 0.263904,
'bond_length': [2.00, 1.80, 1.88, 0],
'fa': [1.06983288e+000, 4.24631786e-001, 2.43897949e-001],
'fb': [2.87791022e-001, 1.24156957e+001, 1.24158868e+001],
'fc': [4.79446296e-002, 7.64958952e-001, 8.23128431e-003],
'fd': [1.36979796e-001, 2.43940729e+000, 5.27258749e-002]},
'K': {'Z': 19, 'chisq': 0.161900,
'bond_length': [2.58, 2.38, 2.75, 0],
'fa': [6.92717865e-001, 9.65161085e-001, 1.48466588e-001],
'fb': [7.10849990e+000, 3.57532901e-001, 3.93763275e-002],
'fc': [2.64645027e-002, 1.80883768e+000, 5.43900018e-001],
'fd': [1.03591321e-001, 3.22845199e+001, 1.67791374e+000]},
'Ca': {'Z': 20, 'chisq': 0.085209,
'bond_length': [2.17, 1.97, 1.97, 0],
'fa': [3.66902871e-001, 8.66378999e-001, 6.67203300e-001],
'fb': [6.14274129e-002, 5.70881727e-001, 7.82965639e+000],
'fc': [4.87743636e-001, 1.82406314e+000, 2.20248453e-002],
'fd': [1.32531318e+000, 2.10056032e+001, 9.11853450e-002]},
'Sc': {'Z': 21, 'chisq': 0.052352,
'bond_length': [1.84, 1.64, 1.70, 0],
'fa': [3.78871777e-001, 9.00022505e-001, 7.15288914e-001],
'fb': [6.98910162e-002, 5.21061541e-001, 7.87707920e+000],
'fc': [1.88640973e-002, 4.07945949e-001, 1.61786540e+000],
'fd': [8.17512708e-002, 1.11141388e+000, 1.80840759e+001]},
'Ti': {'Z': 22, 'chisq': 0.035298,
'bond_length': [1.66, 1.46, 1.70, 0],
'fa': [3.62383267e-001, 9.84232966e-001, 7.41715642e-001],
'fb': [7.54707114e-002, 4.97757309e-001, 8.17659391e+000],
'fc': [3.62555269e-001, 1.49159390e+000, 1.61659509e-002],
'fd': [9.55524906e-001, 1.62221677e+001, 7.33140839e-002]},
'V': {'Z': 23, 'chisq': 0.030745,
'bond_length': [1.55, 1.35, 1.70, 0],
'fa': [3.52961378e-001, 7.46791014e-001, 1.08364068e+000],
'fb': [8.19204103e-002, 8.81189511e+000, 5.10646075e-001],
'fc': [1.39013610e+000, 3.31273356e-001, 1.40422612e-002],
'fd': [1.48901841e+001, 8.38543079e-001, 6.57432678e-002]},
'Cr': {'Z': 24, 'chisq': 0.015287,
'bond_length': [1.56, 1.36, 1.70, 0],
'fa': [1.34348379e+000, 5.07040328e-001, 4.26358955e-001],
'fb': [1.25814353e+000, 1.15042811e+001, 8.53660389e-002],
'fc': [1.17241826e-002, 5.11966516e-001, 3.38285828e-001],
'fd': [6.00177061e-002, 1.53772451e+000, 6.62418319e-001]},
'Mn': {'Z': 25, 'chisq': 0.031274,
'bond_length': [1.54, 1.30, 1.70, 0],
'fa': [3.26697613e-001, 7.17297000e-001, 1.33212464e+000],
'fb': [8.88813083e-002, 1.11300198e+001, 5.82141104e-001],
'fc': [2.80801702e-001, 1.15499241e+000, 1.11984488e-002],
'fd': [6.71583145e-001, 1.26825395e+001, 5.32334467e-002]},
'Fe': {'Z': 26, 'chisq': 0.031315,
'bond_length': [1.47, 1.27, 1.70, 0],
'fa': [3.13454847e-001, 6.89290016e-001, 1.47141531e+000],
'fb': [8.99325756e-002, 1.30366038e+001, 6.33345291e-001],
'fc': [1.03298688e+000, 2.58280285e-001, 1.03460690e-002],
'fd': [1.16783425e+001, 6.09116446e-001, 4.81610627e-002]},
'Co': {'Z': 27, 'chisq': 0.031643,
'bond_length': [1.45, 1.25, 1.70, 0],
'fa': [3.15878278e-001, 1.60139005e+000, 6.56394338e-001],
'fb': [9.46683246e-002, 6.99436449e-001, 1.56954403e+001],
'fc': [9.36746624e-001, 9.77562646e-003, 2.38378578e-001],
'fd': [1.09392410e+001, 4.37446816e-002, 5.56286483e-001]},
'Ni': {'Z': 28, 'chisq': 0.032245,
'bond_length': [1.45, 1.25, 1.63, 0],
'fa': [1.72254630e+000, 3.29543044e-001, 6.23007200e-001],
'fb': [7.76606908e-001, 1.02262360e-001, 1.94156207e+001],
'fc': [9.43496513e-003, 8.54063515e-001, 2.21073515e-001],
'fd': [3.98684596e-002, 1.04078166e+001, 5.10869330e-001]},
'Cu': {'Z': 29, 'chisq': 0.010467,
'bond_length': [1.48, 1.28, 1.40, 0],
'fa': [3.58774531e-001, 1.76181348e+000, 6.36905053e-001],
'fb': [1.06153463e-001, 1.01640995e+000, 1.53659093e+001],
'fc': [7.44930667e-003, 1.89002347e-001, 2.29619589e-001],
'fd': [3.85345989e-002, 3.98427790e-001, 9.01419843e-001]},
'Zn': {'Z': 30, 'chisq': 0.026698,
'bond_length': [1.59, 1.39, 1.39, 0],
'fa': [5.70893973e-001, 1.98908856e+000, 3.06060585e-001],
'fb': [1.26534614e-001, 2.17781965e+000, 3.78619003e+001],
'fc': [2.35600223e-001, 3.97061102e-001, 6.85657228e-003],
'fd': [3.67019041e-001, 8.66419596e-001, 3.35778823e-002]},
'Ga': {'Z': 31, 'chisq': 0.008110,
'bond_length': [1.61, 1.41, 1.87, 0],
'fa': [6.25528464e-001, 2.05302901e+000, 2.89608120e-001],
'fb': [1.10005650e-001, 2.41095786e+000, 4.78685736e+001],
'fc': [2.07910594e-001, 3.45079617e-001, 6.55634298e-003],
'fd': [3.27807224e-001, 7.43139061e-001, 3.09411369e-002]},
'Ge': {'Z': 32, 'chisq': 0.032198,
'bond_length': [1.57, 1.37, 1.70, 0],
'fa': [5.90952690e-001, 5.39980660e-001, 2.00626188e+000],
'fb': [1.18375976e-001, 7.18937433e+001, 1.39304889e+000],
'fc': [7.49705041e-001, 1.83581347e-001, 9.52190743e-003],
'fd': [6.89943350e+000, 3.64667232e-001, 2.69888650e-002]},
'As': {'Z': 33, 'chisq': 0.034014,
'bond_length': [1.59, 1.39, 1.85, 0],
'fa': [7.77875218e-001, 5.93848150e-001, 1.95918751e+000],
'fb': [1.50733157e-001, 1.42882209e+002, 1.74750339e+000],
'fc': [1.79880226e-001, 8.63267222e-001, 9.59053427e-003],
'fd': [3.31800852e-001, 5.85490274e+000, 2.33777569e-002]},
'Se': {'Z': 34, 'chisq': 0.035703,
'bond_length': [1.60, 1.40, 1.90, 0],
'fa': [9.58390681e-001, 6.03851342e-001, 1.90828931e+000],
'fb': [1.83775557e-001, 1.96819224e+002, 2.15082053e+000],
'fc': [1.73885956e-001, 9.35265145e-001, 8.62254658e-003],
'fd': [3.00006024e-001, 4.92471215e+000, 2.12308108e-002]},
'Br': {'Z': 35, 'chisq': 0.039250,
'bond_length': [1.80, 1.60, 2.10, 0],
'fa': [1.14136170e+000, 5.18118737e-001, 1.85731975e+000],
'fb': [2.18708710e-001, 1.93916682e+002, 2.65755396e+000],
'fc': [1.68217399e-001, 9.75705606e-001, 7.24187871e-003],
'fd': [2.71719918e-001, 4.19482500e+000, 1.99325718e-002]},
'Kr': {'Z': 36, 'chisq': 0.045421,
'bond_length': [2.10, 1.90, 2.02, 0],
'fa': [3.24386970e-001, 1.31732163e+000, 1.79912614e+000],
'fb': [6.31317973e+001, 2.54706036e-001, 3.23668394e+000],
'fc': [4.29961425e-003, 1.00429433e+000, 1.62188197e-001],
'fd': [1.98965610e-002, 3.61094513e+000, 2.45583672e-001]},
'Rb': {'Z': 37, 'chisq': 0.130044,
'bond_length': [2.75, 2.55, 1.70, 0],
'fa': [2.90445351e-001, 2.44201329e+000, 7.69435449e-001],
'fb': [3.68420227e-002, 1.16013332e+000, 1.69591472e+001],
'fc': [1.58687000e+000, 2.81617593e-003, 1.28663830e-001],
'fd': [2.53082574e+000, 1.88577417e-002, 2.10753969e-001]},
'Sr': {'Z': 38, 'chisq': 0.188055,
'bond_length': [2.35, 2.15, 1.70, 0],
'fa': [1.37373086e-002, 1.97548672e+000, 1.59261029e+000],
'fb': [1.87469061e-002, 6.36079230e+000, 2.21992482e-001],
'fc': [1.73263882e-001, 4.66280378e+000, 1.61265063e-003],
'fd': [2.01624958e-001, 2.53027803e+001, 1.53610568e-002]},
'Y': {'Z': 39, 'chisq': 0.174927,
'bond_length': [2.00, 1.80, 1.70, 0],
'fa': [6.75302747e-001, 4.70286720e-001, 2.63497677e+000],
'fb': [6.54331847e-002, 1.06108709e+002, 2.06643540e+000],
'fc': [1.09621746e-001, 9.60348773e-001, 5.28921555e-003],
'fd': [1.93131925e-001, 1.63310938e+000, 1.66083821e-002]},
'Zr': {'Z': 40, 'chisq': 0.072078,
'bond_length': [1.80, 1.60, 1.70, 0],
'fa': [2.64365505e+000, 5.54225147e-001, 7.61376625e-001],
'fb': [2.20202699e+000, 1.78260107e+002, 7.67218745e-002],
'fc': [6.02946891e-003, 9.91630530e-002, 9.56782020e-001],
'fd': [1.55143296e-002, 1.76175995e-001, 1.54330682e+000]},
'Nb': {'Z': 41, 'chisq': 0.011800,
'bond_length': [1.67, 1.47, 1.70, 0],
'fa': [6.59532875e-001, 1.84545854e+000, 1.25584405e+000],
'fb': [8.66145490e-002, 5.94774398e+000, 6.40851475e-001],
'fc': [1.22253422e-001, 7.06638328e-001, 2.62381591e-003],
'fd': [1.66646050e-001, 1.62853268e+000, 8.26257859e-003]},
'Mo': {'Z': 42, 'chisq': 0.008976,
'bond_length': [1.60, 1.40, 1.70, 0],
'fa': [6.10160120e-001, 1.26544000e+000, 1.97428762e+000],
'fb': [9.11628054e-002, 5.06776025e-001, 5.89590381e+000],
'fc': [6.48028962e-001, 2.60380817e-003, 1.13887493e-001],
'fd': [1.46634108e+000, 7.84336311e-003, 1.55114340e-001]},
'Tc': {'Z': 43, 'chisq': 0.023771,
'bond_length': [1.56, 1.36, 1.70, 0],
'fa': [8.55189183e-001, 1.66219641e+000, 1.45575475e+000],
'fb': [1.02962151e-001, 7.64907000e+000, 1.01639987e+000],
'fc': [1.05445664e-001, 7.71657112e-001, 2.20992635e-003],
'fd': [1.42303338e-001, 1.34659349e+000, 7.90358976e-003]},
'Ru': {'Z': 44, 'chisq': 0.010613,
'bond_length': [1.54, 1.34, 1.70, 0],
'fa': [4.70847093e-001, 1.58180781e+000, 2.02419818e+000],
'fb': [9.33029874e-002, 4.52831347e-001, 7.11489023e+000],
'fc': [1.97036257e-003, 6.26912639e-001, 1.02641320e-001],
'fd': [7.56181595e-003, 1.25399858e+000, 1.33786087e-001]},
'Rh': {'Z': 45, 'chisq': 0.012895,
'bond_length': [1.54, 1.34, 1.70, 0],
'fa': [4.20051553e-001, 1.76266507e+000, 2.02735641e+000],
'fb': [9.38882628e-002, 4.64441687e-001, 8.19346046e+000],
'fc': [1.45487176e-003, 6.22809600e-001, 9.91529915e-002],
'fd': [7.82704517e-003, 1.17194153e+000, 1.24532839e-001]},
'Pd': {'Z': 46, 'chisq': 0.009172,
'bond_length': [1.58, 1.38, 1.63, 0],
'fa': [2.10475155e+000, 2.03884487e+000, 1.82067264e-001],
'fb': [8.68606470e+000, 3.78924449e-001, 1.42921634e-001],
'fc': [9.52040948e-002, 5.91445248e-001, 1.13328676e-003],
'fd': [1.17125900e-001, 1.07843808e+000, 7.80252092e-003]},
'Ag': {'Z': 47, 'chisq': 0.006648,
'bond_length': [1.64, 1.44, 1.72, 0],
'fa': [2.07981390e+000, 4.43170726e-001, 1.96515215e+000],
'fb': [9.92540297e+000, 1.04920104e-001, 6.40103839e-001],
'fc': [5.96130591e-001, 4.78016333e-001, 9.46458470e-002],
'fd': [8.89594790e-001, 1.98509407e+000, 1.12744464e-001]},
'Cd': {'Z': 48, 'chisq': 0.005588,
'bond_length': [1.77, 1.57, 1.58, 0],
'fa': [1.63657549e+000, 2.17927989e+000, 7.71300690e-001],
'fb': [1.24540381e+001, 1.45134660e+000, 1.26695757e-001],
'fc': [6.64193880e-001, 7.64563285e-001, 8.61126689e-002],
'fd': [7.77659202e-001, 1.66075210e+000, 1.05728357e-001]},
'In': {'Z': 49, 'chisq': 0.002569,
'bond_length': [1.86, 1.66, 1.93, 0],
'fa': [2.24820632e+000, 1.64706864e+000, 7.88679265e-001],
'fb': [1.51913507e+000, 1.30113424e+001, 1.06128184e-001],
'fc': [8.12579069e-002, 6.68280346e-001, 6.38467475e-001],
'fd': [9.94045620e-002, 1.49742063e+000, 7.18422635e-001]},
'Sn': {'Z': 50, 'chisq': 0.005051,
'bond_length': [1.82, 1.62, 2.17, 0],
'fa': [2.16644620e+000, 6.88691021e-001, 1.92431751e+000],
'fb': [1.13174909e+001, 1.10131285e-001, 6.74464853e-001],
'fc': [5.65359888e-001, 9.18683861e-001, 7.80542213e-002],
'fd': [7.33564610e-001, 1.02310312e+001, 9.31104308e-002]},
'Sb': {'Z': 51, 'chisq': 0.004383,
'bond_length': [1.79, 1.59, 2.20, 0],
'fa': [1.73662114e+000, 9.99871380e-001, 2.13972409e+000],
'fb': [8.84334719e-001, 1.38462121e-001, 1.19666432e+001],
'fc': [5.60566526e-001, 9.93772747e-001, 7.37374982e-002],
'fd': [6.72672880e-001, 8.72330411e+000, 8.78577715e-002]},
'Te': {'Z': 52, 'chisq': 0.004105,
'bond_length': [1.80, 1.60, 2.06, 0],
'fa': [2.09383882e+000, 1.56940519e+000, 1.30941993e+000],
'fb': [1.26856869e+001, 1.21236537e+000, 1.66633292e-001],
'fc': [6.98067804e-002, 1.04969537e+000, 5.55594354e-001],
'fd': [8.30817576e-002, 7.43147857e+000, 6.17487676e-001]},
'I': {'Z': 53, 'chisq': 0.004068,
'bond_length': [1.90, 1.70, 2.15, 0],
'fa': [1.60186925e+000, 1.98510264e+000, 1.48226200e+000],
'fb': [1.95031538e-001, 1.36976183e+001, 1.80304795e+000],
'fc': [5.53807199e-001, 1.11728722e+000, 6.60720847e-002],
'fd': [5.67912340e-001, 6.40879878e+000, 7.86615429e-002]},
'Xe': {'Z': 54, 'chisq': 0.004381,
'bond_length': [2.30, 2.10, 2.16, 0],
'fa': [1.60015487e+000, 1.71644581e+000, 1.84968351e+000],
'fb': [2.92913354e+000, 1.55882990e+001, 2.22525983e-001],
'fc': [6.23813648e-002, 1.21387555e+000, 5.54051946e-001],
'fd': [7.45581223e-002, 5.56013271e+000, 5.21994521e-001]},
'Cs': {'Z': 55, 'chisq': 0.042676,
'bond_length': [2.93, 2.73, 1.70, 0],
'fa': [2.95236854e+000, 4.28105721e-001, 1.89599233e+000],
'fb': [6.01461952e+000, 4.64151246e+001, 1.80109756e-001],
'fc': [5.48012938e-002, 4.70838600e+000, 5.90356719e-001],
'fd': [7.12799633e-002, 4.56702799e+001, 4.70236310e-001]},
'Ba': {'Z': 56, 'chisq': 0.043267,
'bond_length': [2.44, 2.24, 1.70, 0],
'fa': [3.19434243e+000, 1.98289586e+000, 1.55121052e-001],
'fb': [9.27352241e+000, 2.28741632e-001, 3.82000231e-002],
'fc': [6.73222354e-002, 4.48474211e+000, 5.42674414e-001],
'fd': [7.30961745e-002, 2.95703565e+001, 4.08647015e-001]},
'La': {'Z': 57, 'chisq': 0.033249,
'bond_length': [2.08, 1.88, 1.70, 0],
'fa': [2.05036425e+000, 1.42114311e-001, 3.23538151e+000],
'fb': [2.20348417e-001, 3.96438056e-002, 9.56979169e+000],
'fc': [6.34683429e-002, 3.97960586e+000, 5.20116711e-001],
'fd': [6.92443091e-002, 2.53178406e+001, 3.83614098e-001]},
'Ce': {'Z': 58, 'chisq': 0.029355,
'bond_length': [2.02, 1.82, 1.70, 0],
'fa': [3.22990759e+000, 1.57618307e-001, 2.13477838e+000],
'fb': [9.94660135e+000, 4.15378676e-002, 2.40480572e-001],
'fc': [5.01907609e-001, 3.80889010e+000, 5.96625028e-002],
'fd': [3.66252019e-001, 2.43275968e+001, 6.59653503e-002]},
'Pr': {'Z': 59, 'chisq': 0.029725,
'bond_length': [2.03, 1.83, 1.70, 0],
'fa': [1.58189324e-001, 3.18141995e+000, 2.27622140e+000],
'fb': [3.91309056e-002, 1.04139545e+001, 2.81671757e-001],
'fc': [3.97705472e+000, 5.58448277e-002, 4.85207954e-001],
'fd': [2.61872978e+001, 6.30921695e-002, 3.54234369e-001]},
'Nd': {'Z': 60, 'chisq': 0.027597,
'bond_length': [2.02, 1.82, 1.70, 0],
'fa': [1.81379417e-001, 3.17616396e+000, 2.35221519e+000],
'fb': [4.37324793e-002, 1.07842572e+001, 3.05571833e-001],
'fc': [3.83125763e+000, 5.25889976e-002, 4.70090742e-001],
'fd': [2.54745408e+001, 6.02676073e-002, 3.39017003e-001]},
'Pm': {'Z': 61, 'chisq': 0.025208,
'bond_length': [2.01, 1.81, 1.70, 0],
'fa': [1.92986811e-001, 2.43756023e+000, 3.17248504e+000],
'fb': [4.37785970e-002, 3.29336996e-001, 1.11259996e+001],
'fc': [3.58105414e+000, 4.56529394e-001, 4.94812177e-002],
'fd': [2.46709586e+001, 3.24990282e-001, 5.76553100e-002]},
'Sm': {'Z': 62, 'chisq': 0.023540,
'bond_length': [2.00, 1.80, 1.70, 0],
'fa': [2.12002595e-001, 3.16891754e+000, 2.51503494e+000],
'fb': [4.57703608e-002, 1.14536599e+001, 3.55561054e-001],
'fc': [4.44080845e-001, 3.36742101e+000, 4.65652543e-002],
'fd': [3.11953363e-001, 2.40291435e+001, 5.52266819e-002]},
'Eu': {'Z': 63, 'chisq': 0.022204,
'bond_length': [2.24, 2.04, 1.70, 0],
'fa': [2.59355002e+000, 3.16557522e+000, 2.29402652e-001],
'fb': [3.82452612e-001, 1.17675155e+001, 4.76642249e-002],
'fc': [4.32257780e-001, 3.17261920e+000, 4.37958317e-002],
'fd': [2.99719833e-001, 2.34462738e+001, 5.29440680e-002]},
'Gd': {'Z': 64, 'chisq': 0.017492,
'bond_length': [2.00, 1.80, 1.70, 0],
'fa': [3.19144939e+000, 2.55766431e+000, 3.32681934e-001],
'fb': [1.20224655e+001, 4.08338876e-001, 5.85819814e-002],
'fc': [4.14243130e-002, 2.61036728e+000, 4.20526863e-001],
'fd': [5.06771477e-002, 1.99344244e+001, 2.85686240e-001]},
'Tb': {'Z': 65, 'chisq': 0.020036,
'bond_length': [1.98, 1.78, 1.70, 0],
'fa': [2.59407462e-001, 3.16177855e+000, 2.75095751e+000],
'fb': [5.04689354e-002, 1.23140183e+001, 4.38337626e-001],
'fc': [2.79247686e+000, 3.85931001e-002, 4.10881708e-001],
'fd': [2.23797309e+001, 4.87920992e-002, 2.77622892e-001]},
'Dy': {'Z': 66, 'chisq': 0.019351,
'bond_length': [1.97, 1.77, 1.70, 0],
'fa': [3.16055396e+000, 2.82751709e+000, 2.75140255e-001],
'fb': [1.25470414e+001, 4.67899094e-001, 5.23226982e-002],
'fc': [4.00967160e-001, 2.63110834e+000, 3.61333817e-002],
'fd': [2.67614884e-001, 2.19498166e+001, 4.68871497e-002]},
'Ho': {'Z': 67, 'chisq': 0.018720,
'bond_length': [1.98, 1.78, 1.70, 0],
'fa': [2.88642467e-001, 2.90567296e+000, 3.15960159e+000],
'fb': [5.40507687e-002, 4.97581077e-001, 1.27599505e+001],
'fc': [3.91280259e-001, 2.48596038e+000, 3.37664478e-002],
'fd': [2.58151831e-001, 2.15400972e+001, 4.50664323e-002]},
'Er': {'Z': 68, 'chisq': 0.018677,
'bond_length': [1.96, 1.76, 1.70, 0],
'fa': [3.15573213e+000, 3.11519560e-001, 2.97722406e+000],
'fb': [1.29729009e+001, 5.81399387e-002, 5.31213394e-001],
'fc': [3.81563854e-001, 2.40247532e+000, 3.15224214e-002],
'fd': [2.49195776e-001, 2.13627616e+001, 4.33253257e-002]},
'Tm': {'Z': 69, 'chisq': 0.018176,
'bond_length': [1.95, 1.75, 1.70, 0],
'fa': [3.15591970e+000, 3.22544710e-001, 3.05569053e+000],
'fb': [1.31232407e+001, 5.97223323e-002, 5.61876773e-001],
'fc': [2.92845100e-002, 3.72487205e-001, 2.27833695e+000],
'fd': [4.16534255e-002, 2.40821967e-001, 2.10034185e+001]},
'Yb': {'Z': 70, 'chisq': 0.018460,
'bond_length': [2.10, 1.90, 1.70, 0],
'fa': [3.10794704e+000, 3.14091221e+000, 3.75660454e-001],
'fb': [6.06347847e-001, 1.33705269e+001, 7.29814740e-002],
'fc': [3.61901097e-001, 2.45409082e+000, 2.72383990e-002],
'fd': [2.32652051e-001, 2.12695209e+001, 3.99969597e-002]},
'Lu': {'Z': 71, 'chisq': 0.015021,
'bond_length': [1.93, 1.73, 1.70, 0],
'fa': [3.11446863e+000, 5.39634353e-001, 3.06460915e+000],
'fb': [1.38968881e+001, 8.91708508e-002, 6.79919563e-001],
'fc': [2.58563745e-002, 2.13983556e+000, 3.47788231e-001],
'fd': [3.82808522e-002, 1.80078788e+001, 2.22706591e-001]},
'Hf': {'Z': 72, 'chisq': 0.012070,
'bond_length': [1.78, 1.58, 1.70, 0],
'fa': [3.01166899e+000, 3.16284788e+000, 6.33421771e-001],
'fb': [7.10401889e-001, 1.38262192e+001, 9.48486572e-002],
'fc': [3.41417198e-001, 1.53566013e+000, 2.40723773e-002],
'fd': [2.14129678e-001, 1.55298698e+001, 3.67833690e-002]},
'Ta': {'Z': 73, 'chisq': 0.010775,
'bond_length': [1.67, 1.47, 1.70, 0],
'fa': [3.20236821e+000, 8.30098413e-001, 2.86552297e+000],
'fb': [1.38446369e+001, 1.18381581e-001, 7.66369118e-001],
'fc': [2.24813887e-002, 1.40165263e+000, 3.33740596e-001],
'fd': [3.52934622e-002, 1.46148877e+001, 2.05704486e-001]},
'W': {'Z': 74, 'chisq': 0.009479,
'bond_length': [1.61, 1.41, 1.70, 0],
'fa': [9.24906855e-001, 2.75554557e+000, 3.30440060e+000],
'fb': [1.28663377e-001, 7.65826479e-001, 1.34471170e+001],
'fc': [3.29973862e-001, 1.09916444e+000, 2.06498883e-002],
'fd': [1.98218895e-001, 1.35087534e+001, 3.38918459e-002]},
'Re': {'Z': 75, 'chisq': 0.004620,
'bond_length': [1.58, 1.38, 1.70, 0],
'fa': [1.96952105e+000, 1.21726619e+000, 4.10391685e+000],
'fb': [4.98830620e+001, 1.33243809e-001, 1.84396916e+000],
'fc': [2.90791978e-002, 2.30696669e-001, 6.08840299e-001],
'fd': [2.84192813e-002, 1.90968784e-001, 1.37090356e+000]},
'Os': {'Z': 76, 'chisq': 0.003085,
'bond_length': [1.55, 1.35, 1.70, 0],
'fa': [2.06385867e+000, 1.29603406e+000, 3.96920673e+000],
'fb': [4.05671697e+001, 1.46559047e-001, 1.82561596e+000],
'fc': [2.69835487e-002, 2.31083999e-001, 6.30466774e-001],
'fd': [2.84172045e-002, 1.79765184e-001, 1.38911543e+000]},
'Ir': {'Z': 77, 'chisq': 0.003924,
'bond_length': [1.56, 1.36, 1.70, 0],
'fa': [2.21522726e+000, 1.37573155e+000, 3.78244405e+000],
'fb': [3.24464090e+001, 1.60920048e-001, 1.78756553e+000],
'fc': [2.44643240e-002, 2.36932016e-001, 6.48471412e-001],
'fd': [2.82909938e-002, 1.70692368e-001, 1.37928390e+000]},
'Pt': {'Z': 78, 'chisq': 0.003817,
'bond_length': [1.59, 1.39, 1.72, 0],
'fa': [9.84697940e-001, 2.73987079e+000, 3.61696715e+000],
'fb': [1.60910839e-001, 7.18971667e-001, 1.29281016e+001],
'fc': [3.02885602e-001, 2.78370726e-001, 1.52124129e-002],
'fd': [1.70134854e-001, 1.49862703e+000, 2.83510822e-002]},
'Au': {'Z': 79, 'chisq': 0.003143,
'bond_length': [1.64, 1.44, 1.66, 0],
'fa': [9.61263398e-001, 3.69581030e+000, 2.77567491e+000],
'fb': [1.70932277e-001, 1.29335319e+001, 6.89997070e-001],
'fc': [2.95414176e-001, 3.11475743e-001, 1.43237267e-002],
'fd': [1.63525510e-001, 1.39200901e+000, 2.71265337e-002]},
'Hg': {'Z': 80, 'chisq': 0.002717,
'bond_length': [1.77, 1.57, 1.55, 0],
'fa': [1.29200491e+000, 2.75161478e+000, 3.49387949e+000],
'fb': [1.83432865e-001, 9.42368371e-001, 1.46235654e+001],
'fc': [2.77304636e-001, 4.30232810e-001, 1.48294351e-002],
'fd': [1.55110144e-001, 1.28871670e+000, 2.61903834e-002]},
'Tl': {'Z': 81, 'chisq': 0.003492,
'bond_length': [1.92, 1.72, 1.96, 0],
'fa': [3.75964730e+000, 3.21195904e+000, 6.47767825e-001],
'fb': [1.35041513e+001, 6.66330993e-001, 9.22518234e-002],
'fc': [2.76123274e-001, 3.18838810e-001, 1.31668419e-002],
'fd': [1.50312897e-001, 1.12565588e+000, 2.48879842e-002]},
'Pb': {'Z': 82, 'chisq': 0.001158,
'bond_length': [1.95, 1.75, 2.02, 0],
'fa': [1.00795975e+000, 3.09796153e+000, 3.61296864e+000],
'fb': [1.17268427e-001, 8.80453235e-001, 1.47325812e+001],
'fc': [2.62401476e-001, 4.05621995e-001, 1.31812509e-002],
'fd': [1.43491014e-001, 1.04103506e+000, 2.39575415e-002]},
'Bi': {'Z': 83, 'chisq': 0.026436,
'bond_length': [1.90, 1.70, 1.70, 0],
'fa': [1.59826875e+000, 4.38233925e+000, 2.06074719e+000],
'fb': [1.56897471e-001, 2.47094692e+000, 5.72438972e+001],
'fc': [1.94426023e-001, 8.22704978e-001, 2.33226953e-002],
'fd': [1.32979109e-001, 9.56532528e-001, 2.23038435e-002]},
'Po': {'Z': 84, 'chisq': 0.008962,
'bond_length': [1.96, 1.76, 1.70, 0],
'fa': [1.71463223e+000, 2.14115960e+000, 4.37512413e+000],
'fb': [9.79262841e+001, 2.10193717e-001, 3.66948812e+000],
'fc': [2.16216680e-002, 1.97843837e-001, 6.52047920e-001],
'fd': [1.98456144e-002, 1.33758807e-001, 7.80432104e-001]},
'At': {'Z': 85, 'chisq': 0.033776,
'bond_length': [2.00, 1.80, 1.70, 0],
'fa': [1.48047794e+000, 2.09174630e+000, 4.75246033e+000],
'fb': [1.25943919e+002, 1.83803008e-001, 4.19890596e+000],
'fc': [1.85643958e-002, 2.05859375e-001, 7.13540948e-001],
'fd': [1.81383503e-002, 1.33035404e-001, 7.03031938e-001]},
'Rn': {'Z': 86, 'chisq': 0.050132,
'bond_length': [2.40, 2.20, 1.70, 0],
'fa': [6.30022295e-001, 3.80962881e+000, 3.89756067e+000],
'fb': [1.40909762e-001, 3.08515540e+001, 6.51559763e-001],
'fc': [2.40755100e-001, 2.62868577e+000, 3.14285931e-002],
'fd': [1.08899672e-001, 6.42383261e+000, 2.42346699e-002]},
'Fr': {'Z': 87, 'chisq': 0.056720,
'bond_length': [3.00, 2.80, 1.70, 0],
'fa': [5.23288135e+000, 2.48604205e+000, 3.23431354e-001],
'fb': [8.60599536e+000, 3.04543982e-001, 3.87759096e-002],
'fc': [2.55403596e-001, 5.53607228e-001, 5.75278889e-003],
'fd': [1.28717724e-001, 5.36977452e-001, 1.29417790e-002]},
'Ra': {'Z': 88, 'chisq': 0.081498,
'bond_length': [2.46, 2.26, 1.70, 0],
'fa': [1.44192685e+000, 3.55291725e+000, 3.91259586e+000],
'fb': [1.18740873e-001, 1.01739750e+000, 6.31814783e+001],
'fc': [2.16173519e-001, 3.94191605e+000, 4.60422605e-002],
'fd': [9.55806441e-002, 3.50602732e+001, 2.20850385e-002]},
'Ac': {'Z': 89, 'chisq': 0.077643,
'bond_length': [2.09, 1.88, 1.70, 0],
'fa': [1.45864127e+000, 4.18945405e+000, 3.65866182e+000],
'fb': [1.07760494e-001, 8.89090649e+001, 1.05088931e+000],
'fc': [2.08479229e-001, 3.16528117e+000, 5.23892556e-002],
'fd': [9.09335557e-002, 3.13297788e+001, 2.08807697e-002]},
'Th': {'Z': 90, 'chisq': 0.048096,
'bond_length': [2.00, 1.80, 1.70, 0],
'fa': [1.19014064e+000, 2.55380607e+000, 4.68110181e+000],
'fb': [7.73468729e-002, 6.59693681e-001, 1.28013896e+001],
'fc': [2.26121303e-001, 3.58250545e-001, 7.82263950e-003],
'fd': [1.08632194e-001, 4.56765664e-001, 1.62623474e-002]},
'Pa': {'Z': 91, 'chisq': 0.070186,
'bond_length': [1.83, 1.63, 1.70, 0],
'fa': [4.68537504e+000, 2.98413708e+000, 8.91988061e-001],
'fb': [1.44503632e+001, 5.56438592e-001, 6.69512914e-002],
'fc': [2.24825384e-001, 3.04444846e-001, 9.48162708e-003],
'fd': [1.03235396e-001, 4.27255647e-001, 1.77730611e-002]},
'U': {'Z': 92, 'chisq': 0.072478,
'bond_length': [1.76, 1.56, 1.86, 0],
'fa': [4.63343606e+000, 3.18157056e+000, 8.76455075e-001],
'fb': [1.63377267e+001, 5.69517868e-001, 6.88860012e-002],
'fc': [2.21685477e-001, 2.72917100e-001, 1.11737298e-002],
'fd': [9.84254550e-002, 4.09470917e-001, 1.86215410e-002]},
'Np': {'Z': 93, 'chisq': 0.074792,
'bond_length': [1.80, 1.60, 1.70, 0],
'fa': [4.56773888e+000, 3.40325179e+000, 8.61841923e-001],
'fb': [1.90992795e+001, 5.90099634e-001, 7.03204851e-002],
'fc': [2.19728870e-001, 2.38176903e-001, 1.38306499e-002],
'fd': [9.36334280e-002, 3.93554882e-001, 1.94437286e-002]},
'Pu': {'Z': 94, 'chisq': 0.071877,
'bond_length': [1.84, 1.64, 1.70, 0],
'fa': [5.45671123e+000, 1.11687906e-001, 3.30260343e+000],
'fb': [1.01892720e+001, 3.98131313e-002, 3.14622212e-001],
'fc': [1.84568319e-001, 4.93644263e-001, 3.57484743e+000],
'fd': [1.04220860e-001, 4.63080540e-001, 2.19369542e+001]},
'Am': {'Z': 95, 'chisq': 0.062156,
'bond_length': [2.01, 1.81, 1.70, 0],
'fa': [5.38321999e+000, 1.23343236e-001, 3.46469090e+000],
'fb': [1.07289857e+001, 4.15137806e-002, 3.39326208e-001],
'fc': [1.75437132e-001, 3.39800073e+000, 4.69459519e-001],
'fd': [9.98932346e-002, 2.11601535e+001, 4.51996970e-001]},
'Cm': {'Z': 96, 'chisq': 0.050111,
'bond_length': [2.20, 2.00, 1.70, 0],
'fa': [5.38402377e+000, 3.49861264e+000, 1.88039547e-001],
'fb': [1.11211419e+001, 3.56750210e-001, 5.39853583e-002],
'fc': [1.69143137e-001, 3.19595016e+000, 4.64393059e-001],
'fd': [9.60082633e-002, 1.80694389e+001, 4.36318197e-001]},
'Bk': {'Z': 97, 'chisq': 0.044081,
'bond_length': [2.20, 2.00, 1.70, 0],
'fa': [3.66090688e+000, 2.03054678e-001, 5.30697515e+000],
'fb': [3.84420906e-001, 5.48547131e-002, 1.17150262e+001],
'fc': [1.60934046e-001, 3.04808401e+000, 4.43610295e-001],
'fd': [9.21020329e-002, 1.73525367e+001, 4.27132359e-001]},
'Cf': {'Z': 98, 'chisq': 0.041053,
'bond_length': [2.20, 2.00, 1.70, 0],
'fa': [3.94150390e+000, 5.16915345e+000, 1.61941074e-001],
'fb': [4.18246722e-001, 1.25201788e+001, 4.81540117e-002],
'fc': [4.15299561e-001, 2.91761325e+000, 1.51474927e-001],
'fd': [4.24913856e-001, 1.90899693e+001, 8.81568925e-002]}
}
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import Iterator, List
# Third-party imports
import numpy as np
# First-party imports
from gluonts.core.component import validated
from gluonts.transform import DataEntry, FlatMapTransformation, shift_timestamp
class ForkingSequenceSplitter(FlatMapTransformation):
"""Forking sequence splitter."""
@validated()
def __init__(
self,
train_sampler,
enc_len: int,
dec_len: int,
time_series_fields: List[str] = None,
target_in="target",
is_pad_out: str = "is_pad",
start_in: str = "start",
forecast_start_out: str = "forecast_start",
) -> None:
assert enc_len > 0, "The value of `enc_len` should be > 0"
assert dec_len > 0, "The value of `dec_len` should be > 0"
self.train_sampler = train_sampler
self.enc_len = enc_len
self.dec_len = dec_len
self.ts_fields = (
time_series_fields if time_series_fields is not None else []
)
self.target_in = target_in
self.is_pad_out = is_pad_out
self.start_in = start_in
self.forecast_start_out = forecast_start_out
def _past(self, col_name):
return f"past_{col_name}"
def _future(self, col_name):
return f"future_{col_name}"
def flatmap_transform(
self, data: DataEntry, is_train: bool
) -> Iterator[DataEntry]:
dec_len = self.dec_len
slice_cols = self.ts_fields + [self.target_in]
target = data[self.target_in]
if is_train:
if len(target) < self.dec_len:
# We currently cannot handle time series that are shorter than the
# prediction length during training, so we just skip these.
# If we want to include them we would need to pad and to mask
# the loss.
sampling_indices: List[int] = []
else:
sampling_indices = self.train_sampler(
target, 0, len(target) - self.dec_len
)
else:
sampling_indices = [len(target)]
for i in sampling_indices:
pad_length = max(self.enc_len - i, 0)
d = data.copy()
for ts_field in slice_cols:
if i > self.enc_len:
# truncate to past_length
past_piece = d[ts_field][..., i - self.enc_len : i]
elif i < self.enc_len:
pad_block = np.zeros(
d[ts_field].shape[:-1] + (pad_length,)
)
past_piece = np.concatenate(
[pad_block, d[ts_field][..., :i]], axis=-1
)
else:
past_piece = d[ts_field][..., :i]
d[self._past(ts_field)] = np.expand_dims(past_piece, -1)
if is_train and ts_field is self.target_in:
forking_dec_field = np.zeros(
shape=(self.enc_len, self.dec_len)
)
for j in range(self.enc_len):
start_idx = i - self.enc_len + j + 1
if start_idx >= 0:
forking_dec_field[j, :] = d[ts_field][
..., start_idx : start_idx + dec_len
]
d[self._future(ts_field)] = forking_dec_field
del d[ts_field]
pad_indicator = np.zeros(self.enc_len)
if pad_length > 0:
pad_indicator[:pad_length] = 1
d[self._past(self.is_pad_out)] = pad_indicator
d[self.forecast_start_out] = shift_timestamp(d[self.start_in], i)
yield d
|
# Copyright (c) 2015-2020 Avere Systems, Inc. All Rights Reserved.
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root for license information.
'''Abstraction for backend services instance objects
Because the data structure/objects backend services consume and return are not
uniform, ServiceInstance abstracts them and provides a useful and consistent
interface.
Cookbook/examples:
# AWS existing
inst = ServiceInstance(service=aws, instance_id='i-83739423')
# AWS new
i = aws.create_instance(...)
inst = ServiceInstance(aws, i)
# GCE existing
inst = ServiceInstance(service=gce, instance_id='my-node-1')
# GCE new
i = gce.create_instance(...)
inst = ServiceInstance(gce, i)
# or the .create() constructor which takes
srv = aws or gce or azure
inst = ServiceInstance.create(srv, **opts)
# using the ServiceInstance
inst.start()
inst.stop()
inst.restart()
inst.destroy()
inst.shelve()
inst.unshelve()
inst.is_on()
inst.is_off()
inst.is_shelved()
inst.id()
inst.name()
inst.ip()
inst.fqdn()
inst.status()
inst.refresh()
inst.in_use_addresses()
inst.add_address('172.16.16.20')
inst.remove_address('172.16.16.20')
'''
from vFXT.service import vFXTConfigurationException
class ServiceInstance(object): #pylint: disable=useless-object-inheritance
'''Presents service specific instance objects in a general way. This may
or may not be a vFXT (and so is usable for general purpose cloud instances)
The ServiceInstance composes both the backend service object and the
instance object that is returned from the backend service. Every
method delegates to the service interface.
'''
def __init__(self, service=None, instance_id=None, instance=None):
'''Constructor
Arguments:
service (Service object): backend service
instance_id (str, optional): instance ID
instance (obj, optional): instance object as returned from the backend
Either an instance ID or an instance must be provided. If the
instance ID is provided, the instance object is looked up from
the backend.
'''
self.instance_id = instance_id
self.instance = instance
self.service = service
if instance_id and service and not instance:
self.instance = service.get_instance(instance_id)
if not self.instance:
raise vFXTConfigurationException("No such instance: {}".format(instance_id))
if instance and service and not instance_id:
self.instance_id = service.instance_id(self.instance)
if not self.instance:
raise vFXTConfigurationException("An instance ID or instance object must be provided")
@classmethod
def create(cls, service, *args, **kwargs):
'''Create an instance
This delegates to the service.create_instance call. See
documentation there for specific arguments supported by the
backend service.
'''
instance = service.create_instance(*args, **kwargs)
return cls(service, instance=instance)
# delegate to service
def can_stop(self):
'''Some instance configurations cannot be stopped. Check if this is one.'''
return self.service.can_stop(self.instance)
def stop(self):
'''Stop the instance'''
self.service.stop(self.instance)
self.refresh()
def start(self):
'''Start the instance'''
self.service.start(self.instance)
self.refresh()
def restart(self):
'''Restart the instance'''
self.service.restart(self.instance)
self.refresh()
def destroy(self, **options):
'''Destroy the instance'''
self.refresh()
return self.service.destroy(self.instance, **options)
def is_on(self):
'''Return True if the instance is currently on'''
return self.service.is_on(self.instance)
def is_off(self):
'''Return True if the instance is currently off'''
return self.service.is_off(self.instance)
def is_shelved(self):
'''Return True if the instance is currently shelved'''
return self.service.is_shelved(self.instance)
def id(self):
'''The instance ID'''
return self.instance_id
def name(self):
'''The instance name'''
return self.service.name(self.instance)
def ip(self):
'''The primary IP address of the instance'''
return self.service.ip(self.instance)
def fqdn(self):
'''The instance fully qualified domain name'''
return self.service.fqdn(self.instance)
def status(self):
'''The instance status (str)'''
return self.service.status(self.instance)
def refresh(self):
'''Refresh the backend instance object'''
self.instance = self.service.refresh(self.instance)
if not self.instance:
raise vFXTConfigurationException("Failed to refresh, no such instance: {}".format(self.instance_id))
def can_shelve(self):
'''Some instance configurations cannot be shelved. Check if this is one.'''
return self.service.can_shelve(self.instance)
def shelve(self):
'''Shelve the instance'''
self.refresh()
self.service.shelve(self.instance)
self.refresh()
def unshelve(self, **options):
'''Unshelve the instance'''
self.refresh()
self.service.unshelve(self.instance, **options)
self.refresh()
def wait_for_service_checks(self):
'''Wait for any instance service checks (if available)'''
return self.service.wait_for_service_checks(self.instance)
def in_use_addresses(self, category='all'):
'''Get the in use addresses for the instance
Arguments:
category (str): all (default), instance, routes
'''
self.refresh()
return self.service.instance_in_use_addresses(self.instance, category)
def add_address(self, address, **options):
'''Add an address to the instance
Arguments:
address (str): IP address
options (dict): passed to service backend
'''
self.refresh()
self.service.add_instance_address(self.instance, address, **options)
self.refresh()
def remove_address(self, address):
'''Remove an address from the instance
Arguments:
address (str): IP address
'''
self.refresh()
self.service.remove_instance_address(self.instance, address)
self.refresh()
|
import pandas as pd
import numpy as np
import os
import tqdm
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
from sklearn.model_selection import train_test_split
label2int = {
"male": 1,
"female": 0
}
def load_data(vector_length=128):
"""A function to load gender recognition dataset from `data` folder
After the second run, this will load from results/features.npy and results/labels.npy files
as it is much faster!"""
# make sure results folder exists
if not os.path.isdir("results"):
os.mkdir("results")
# if features & labels already loaded individually and bundled, load them from there instead
if os.path.isfile("results/features.npy") and os.path.isfile("results/labels.npy"):
X = np.load("results/features.npy")
y = np.load("results/labels.npy")
return X, y
# read dataframe
df = pd.read_csv("balanced-all.csv")
# get total samples
n_samples = len(df)
# get total male samples
n_male_samples = len(df[df['gender'] == 'male'])
# get total female samples
n_female_samples = len(df[df['gender'] == 'female'])
print("Total samples:", n_samples)
print("Total male samples:", n_male_samples)
print("Total female samples:", n_female_samples)
# initialize an empty array for all audio features
X = np.zeros((n_samples, vector_length))
# initialize an empty array for all audio labels (1 for male and 0 for female)
y = np.zeros((n_samples, 1))
for i, (filename, gender) in tqdm.tqdm(enumerate(zip(df['filename'], df['gender'])), "Loading data", total=n_samples):
features = np.load(filename)
X[i] = features
y[i] = label2int[gender]
# save the audio features and labels into files
# so we won't load each one of them next run
np.save("results/features", X)
np.save("results/labels", y)
return X, y
def split_data(X, y, test_size=0.1, valid_size=0.1):
# split training set and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=7)
# split training set and validation set
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=valid_size, random_state=7)
# return a dictionary of values
return {
"X_train": X_train,
"X_valid": X_valid,
"X_test": X_test,
"y_train": y_train,
"y_valid": y_valid,
"y_test": y_test
}
def create_model(vector_length=128):
"""5 hidden dense layers from 256 units to 64, not the best model, but not bad."""
model = Sequential()
model.add(Dense(256, input_shape=(vector_length,)))
model.add(Dropout(0.3))
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(64, activation="relu"))
model.add(Dropout(0.3))
# one output neuron with sigmoid activation function, 0 means female, 1 means male
model.add(Dense(1, activation="sigmoid"))
# using binary crossentropy as it's male/female classification (binary)
model.compile(loss="binary_crossentropy", metrics=["accuracy"], optimizer="adam")
# print summary of the model
## model.summary()
return model |
from random import randint
from django.contrib.auth import get_user_model
__author__ = "Alex Laird"
__copyright__ = "Copyright 2019, Helium Edu"
__version__ = "1.4.38"
def generate_phone_verification_code():
code = None
while not code:
code = randint(100000, 999999)
# Ensure the slug does not already exist in the database
if get_user_model().objects.phone_verification_code_used(code):
code = None
return code
|
# A parallel code to get progenitor particles (of any type) for TNG snapshots. Run this code directly. The core code is progenitor_particles.py
import argparse
import h5py
from mpi4py import MPI
import numpy as np
import time
import glob
import os
from codes import progenitor_particles
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
ts = time.asctime()
print('\n Rank =', rank, 'started!', ts, flush=True)
def _get_files(basepath, snap):
"""Get all files whithin each rank """
rank = comm.Get_rank()
size = comm.Get_size()
snap=str(snap).rjust(3,'0')
dir_name = os.path.join(basepath, "snapdir_"+snap)
if not os.path.isdir(dir_name):
raise OSError('The snapshot directory not found')
fnames = []
# A list of files needs to be examined
fnames = glob.glob(os.path.join(dir_name, "snap_"+snap+"*.hdf5"))
num_files = len(fnames)
if rank == 0 :
print('num_files = ', num_files)
return fnames
"""
else:
files_per_rank = int(num_files/size)
#a list of file names for each rank
fnames_rank = fnames[rank*files_per_rank : (rank+1)*files_per_rank]
# Some ranks get 1 more snaphot file
remained = int(num_files - files_per_rank*size)
print('remained files ', remained)
if rank in range(1,remained+1):
fnames_rank.append(fnames[files_per_rank*size + rank-1 ])
print('Ranks with more files ', rank, fnames_rank)
return fnames_rank
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--PartType', type=int, required=True)
parser.add_argument('--basepath', type=str, required=True,
help='The path to parent basepath for snapshots')
parser.add_argument('--snap', type=int, required=True,
help='Snapshot number')
parser.add_argument('--coord_dir', type=str, required=True,
help='A temporary directory is made within this to save the coordinates of the particles at high z')
parser.add_argument('--savedir', type=str, required=True,
help='The direcrtory to save the full density map of this cluster on')
parser.add_argument('--cluster', type=int, default=0, required=True,
help='The cluster index')
args = parser.parse_args()
fnames = _get_files(basepath=args.basepath, snap = args.snap)
# Call the core code
progenitor_particles.get_part_coord_parallel(MPI=MPI, cluster_ind= args.cluster, basepath = args.basepath, fnames=fnames, coord_dir= args.coord_dir, savedir=args.savedir, PartType=args.PartType)
te = time.asctime()
print('\n Rank =', rank, 'Ended at :', te, flush=True)
# Make sure earlierst rank waits till all are done
comm.Barrier()
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-arguments
"""
This submodule contains the discrete-variable quantum operations that are the
core parameterized gates.
"""
# pylint:disable=abstract-method,arguments-differ,protected-access,invalid-overridden-method
import functools
import math
from operator import matmul
import numpy as np
import pennylane as qml
from pennylane.operation import AnyWires, Operation
from pennylane.ops.qubit.non_parametric_ops import PauliX, PauliY, PauliZ, Hadamard
from pennylane.operation import expand_matrix
from pennylane.utils import pauli_eigs
from pennylane.wires import Wires
INV_SQRT2 = 1 / math.sqrt(2)
stack_last = functools.partial(qml.math.stack, axis=-1)
class RX(Operation):
r"""
The single qubit X rotation
.. math:: R_x(\phi) = e^{-i\phi\sigma_x/2} = \begin{bmatrix}
\cos(\phi/2) & -i\sin(\phi/2) \\
-i\sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_x(\phi)) = \frac{1}{2}\left[f(R_x(\phi+\pi/2)) - f(R_x(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_x(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "X"
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return -0.5 * PauliX(wires=self.wires)
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(theta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.RX.matrix`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.RX.compute_matrix(torch.tensor(0.5))
tensor([[0.9689+0.0000j, 0.0000-0.2474j],
[0.0000-0.2474j, 0.9689+0.0000j]])
"""
c = qml.math.cos(theta / 2)
s = qml.math.sin(theta / 2)
if qml.math.get_interface(theta) == "tensorflow":
c = qml.math.cast_like(c, 1j)
s = qml.math.cast_like(s, 1j)
# The following avoids casting an imaginary quantity to reals when backpropagating
c = (1 + 0j) * c
js = -1j * s
return qml.math.stack([stack_last([c, js]), stack_last([js, c])], axis=-2)
def adjoint(self):
return RX(-self.data[0], wires=self.wires)
def pow(self, z):
return [RX(self.data[0] * z, wires=self.wires)]
def _controlled(self, wire):
CRX(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# RX(\theta) = RZ(-\pi/2) RY(\theta) RZ(\pi/2)
pi_half = qml.math.ones_like(self.data[0]) * (np.pi / 2)
return [pi_half, self.data[0], -pi_half]
class RY(Operation):
r"""
The single qubit Y rotation
.. math:: R_y(\phi) = e^{-i\phi\sigma_y/2} = \begin{bmatrix}
\cos(\phi/2) & -\sin(\phi/2) \\
\sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_y(\phi)) = \frac{1}{2}\left[f(R_y(\phi+\pi/2)) - f(R_y(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_y(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "Y"
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return -0.5 * PauliY(wires=self.wires)
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(theta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.RY.matrix`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.RY.compute_matrix(torch.tensor(0.5))
tensor([[ 0.9689, -0.2474],
[ 0.2474, 0.9689]])
"""
c = qml.math.cos(theta / 2)
s = qml.math.sin(theta / 2)
if qml.math.get_interface(theta) == "tensorflow":
c = qml.math.cast_like(c, 1j)
s = qml.math.cast_like(s, 1j)
# The following avoids casting an imaginary quantity to reals when backpropagating
c = (1 + 0j) * c
s = (1 + 0j) * s
return qml.math.stack([stack_last([c, -s]), stack_last([s, c])], axis=-2)
def adjoint(self):
return RY(-self.data[0], wires=self.wires)
def pow(self, z):
return [RY(self.data[0] * z, wires=self.wires)]
def _controlled(self, wire):
CRY(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# RY(\theta) = RZ(0) RY(\theta) RZ(0)
return [0.0, self.data[0], 0.0]
class RZ(Operation):
r"""
The single qubit Z rotation
.. math:: R_z(\phi) = e^{-i\phi\sigma_z/2} = \begin{bmatrix}
e^{-i\phi/2} & 0 \\
0 & e^{i\phi/2}
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_z(\phi)) = \frac{1}{2}\left[f(R_z(\phi+\pi/2)) - f(R_z(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_z(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "Z"
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return -0.5 * PauliZ(wires=self.wires)
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(theta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.RZ.matrix`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.RZ.compute_matrix(torch.tensor(0.5))
tensor([[0.9689-0.2474j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.9689+0.2474j]])
"""
if qml.math.get_interface(theta) == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
p = qml.math.exp(-0.5j * theta)
z = qml.math.zeros_like(p)
return qml.math.stack([stack_last([p, z]), stack_last([z, qml.math.conj(p)])], axis=-2)
@staticmethod
def compute_eigvals(theta): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.RZ.eigvals`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: eigenvalues
**Example**
>>> qml.RZ.compute_eigvals(torch.tensor(0.5))
tensor([0.9689-0.2474j, 0.9689+0.2474j])
"""
if qml.math.get_interface(theta) == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
p = qml.math.exp(-0.5j * theta)
return stack_last([p, qml.math.conj(p)])
def adjoint(self):
return RZ(-self.data[0], wires=self.wires)
def pow(self, z):
return [RZ(self.data[0] * z, wires=self.wires)]
def _controlled(self, wire):
CRZ(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# RZ(\theta) = RZ(\theta) RY(0) RZ(0)
return [self.data[0], 0.0, 0.0]
class PhaseShift(Operation):
r"""
Arbitrary single qubit local phase shift
.. math:: R_\phi(\phi) = e^{i\phi/2}R_z(\phi) = \begin{bmatrix}
1 & 0 \\
0 & e^{i\phi}
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(R_\phi(\phi)) = \frac{1}{2}\left[f(R_\phi(\phi+\pi/2)) - f(R_\phi(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`R_{\phi}(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "Z"
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return qml.Projector(np.array([1]), wires=self.wires)
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
def label(self, decimals=None, base_label=None, cache=None):
return super().label(decimals=decimals, base_label=base_label or "Rϕ", cache=cache)
@staticmethod
def compute_matrix(phi): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.PhaseShift.matrix`
Args:
phi (tensor_like or float): phase shift
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.PhaseShift.compute_matrix(torch.tensor(0.5))
tensor([[0.9689-0.2474j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.9689+0.2474j]])
"""
if qml.math.get_interface(phi) == "tensorflow":
phi = qml.math.cast_like(phi, 1j)
p = qml.math.exp(1j * phi)
z = qml.math.zeros_like(p)
return qml.math.stack([stack_last([qml.math.ones_like(p), z]), stack_last([z, p])], axis=-2)
@staticmethod
def compute_eigvals(phi): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.PhaseShift.eigvals`
Args:
phi (tensor_like or float): phase shift
Returns:
tensor_like: eigenvalues
**Example**
>>> qml.PhaseShift.compute_eigvals(torch.tensor(0.5))
tensor([1.0000+0.0000j, 0.8776+0.4794j])
"""
if qml.math.get_interface(phi) == "tensorflow":
phi = qml.math.cast_like(phi, 1j)
p = qml.math.exp(1j * phi)
return stack_last([qml.math.ones_like(p), p])
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.PhaseShift.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Any, Wires): wires that the operator acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.PhaseShift.compute_decomposition(1.234, wires=0)
[RZ(1.234, wires=[0])]
"""
return [RZ(phi, wires=wires)]
def adjoint(self):
return PhaseShift(-self.data[0], wires=self.wires)
def pow(self, z):
return [PhaseShift(self.data[0] * z, wires=self.wires)]
def _controlled(self, wire):
ControlledPhaseShift(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
# PhaseShift(\theta) = RZ(\theta) RY(0) RZ(0)
return [self.data[0], 0.0, 0.0]
class ControlledPhaseShift(Operation):
r"""
A qubit controlled phase shift.
.. math:: CR_\phi(\phi) = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 0 & 0 & e^{i\phi}
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(CR_\phi(\phi)) = \frac{1}{2}\left[f(CR_\phi(\phi+\pi/2)) - f(CR_\phi(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`CR_{\phi}(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "Z"
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return qml.Projector(np.array([1, 1]), wires=self.wires)
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
def label(self, decimals=None, base_label=None, cache=None):
return super().label(decimals=decimals, base_label=base_label or "Rϕ", cache=cache)
@staticmethod
def compute_matrix(phi): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.PhaseShift.matrix`
Args:
phi (tensor_like or float): phase shift
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.PhaseShift.compute_matrix(torch.tensor(0.5))
tensor([[1.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0000+0.0000j],
[0.0+0.0j, 1.0+0.0j, 0.0+0.0j, 0.0000+0.0000j],
[0.0+0.0j, 0.0+0.0j, 1.0+0.0j, 0.0000+0.0000j],
[0.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.8776+0.4794j]])
"""
if qml.math.get_interface(phi) == "tensorflow":
phi = qml.math.cast_like(phi, 1j)
exp_part = qml.math.exp(1j * phi)
if qml.math.ndim(phi) > 0:
ones = qml.math.ones_like(exp_part)
zeros = qml.math.zeros_like(exp_part)
matrix = [
[ones, zeros, zeros, zeros],
[zeros, ones, zeros, zeros],
[zeros, zeros, ones, zeros],
[zeros, zeros, zeros, exp_part],
]
return qml.math.stack([stack_last(row) for row in matrix], axis=-2)
return qml.math.diag([1, 1, 1, exp_part])
@staticmethod
def compute_eigvals(phi): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.ControlledPhaseShift.eigvals`
Args:
phi (tensor_like or float): phase shift
Returns:
tensor_like: eigenvalues
**Example**
>>> qml.ControlledPhaseShift.compute_eigvals(torch.tensor(0.5))
tensor([1.0000+0.0000j, 1.0000+0.0000j, 1.0000+0.0000j, 0.8776+0.4794j])
"""
if qml.math.get_interface(phi) == "tensorflow":
phi = qml.math.cast_like(phi, 1j)
exp_part = qml.math.exp(1j * phi)
ones = qml.math.ones_like(exp_part)
return stack_last([ones, ones, ones, exp_part])
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.ControlledPhaseShift.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Iterable, Wires): wires that the operator acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.ControlledPhaseShift.compute_decomposition(1.234, wires=(0,1))
[PhaseShift(0.617, wires=[0]),
CNOT(wires=[0, 1]),
PhaseShift(-0.617, wires=[1]),
CNOT(wires=[0, 1]),
PhaseShift(0.617, wires=[1])]
"""
decomp_ops = [
qml.PhaseShift(phi / 2, wires=wires[0]),
qml.CNOT(wires=wires),
qml.PhaseShift(-phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
qml.PhaseShift(phi / 2, wires=wires[1]),
]
return decomp_ops
def adjoint(self):
return ControlledPhaseShift(-self.data[0], wires=self.wires)
def pow(self, z):
return [ControlledPhaseShift(self.data[0] * z, wires=self.wires)]
@property
def control_wires(self):
return Wires(self.wires[0])
CPhase = ControlledPhaseShift
class Rot(Operation):
r"""
Arbitrary single qubit rotation
.. math::
R(\phi,\theta,\omega) = RZ(\omega)RY(\theta)RZ(\phi)= \begin{bmatrix}
e^{-i(\phi+\omega)/2}\cos(\theta/2) & -e^{i(\phi-\omega)/2}\sin(\theta/2) \\
e^{-i(\phi-\omega)/2}\sin(\theta/2) & e^{i(\phi+\omega)/2}\cos(\theta/2)
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 3
* Number of dimensions per parameter: (0, 0, 0)
* Gradient recipe: :math:`\frac{d}{d\phi}f(R(\phi, \theta, \omega)) = \frac{1}{2}\left[f(R(\phi+\pi/2, \theta, \omega)) - f(R(\phi-\pi/2, \theta, \omega))\right]`
where :math:`f` is an expectation value depending on :math:`R(\phi, \theta, \omega)`.
This gradient recipe applies for each angle argument :math:`\{\phi, \theta, \omega\}`.
.. note::
If the ``Rot`` gate is not supported on the targeted device, PennyLane
will attempt to decompose the gate into :class:`~.RZ` and :class:`~.RY` gates.
Args:
phi (float): rotation angle :math:`\phi`
theta (float): rotation angle :math:`\theta`
omega (float): rotation angle :math:`\omega`
wires (Any, Wires): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 3
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0, 0, 0)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,), (1,), (1,)]
def __init__(self, phi, theta, omega, wires, do_queue=True, id=None):
super().__init__(phi, theta, omega, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(phi, theta, omega): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.Rot.matrix`
Args:
phi (tensor_like or float): first rotation angle
theta (tensor_like or float): second rotation angle
omega (tensor_like or float): third rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.Rot.compute_matrix(torch.tensor(0.1), torch.tensor(0.2), torch.tensor(0.3))
tensor([[ 0.9752-0.1977j, -0.0993+0.0100j],
[ 0.0993+0.0100j, 0.9752+0.1977j]])
"""
# It might be that they are in different interfaces, e.g.,
# Rot(0.2, 0.3, tf.Variable(0.5), wires=0)
# So we need to make sure the matrix comes out having the right type
interface = qml.math._multi_dispatch([phi, theta, omega])
c = qml.math.cos(theta / 2)
s = qml.math.sin(theta / 2)
# If anything is not tensorflow, it has to be casted and then
if interface == "tensorflow":
phi = qml.math.cast_like(qml.math.asarray(phi, like=interface), 1j)
omega = qml.math.cast_like(qml.math.asarray(omega, like=interface), 1j)
c = qml.math.cast_like(qml.math.asarray(c, like=interface), 1j)
s = qml.math.cast_like(qml.math.asarray(s, like=interface), 1j)
# The following variable is used to assert the all terms to be stacked have same shape
one = qml.math.ones_like(phi) * qml.math.ones_like(omega)
c = c * one
s = s * one
mat = [
[
qml.math.exp(-0.5j * (phi + omega)) * c,
-qml.math.exp(0.5j * (phi - omega)) * s,
],
[
qml.math.exp(-0.5j * (phi - omega)) * s,
qml.math.exp(0.5j * (phi + omega)) * c,
],
]
return qml.math.stack([stack_last(row) for row in mat], axis=-2)
@staticmethod
def compute_decomposition(phi, theta, omega, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.Rot.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
theta (float): rotation angle :math:`\theta`
omega (float): rotation angle :math:`\omega`
wires (Any, Wires): the wire the operation acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.Rot.compute_decomposition(1.2, 2.3, 3.4, wires=0)
[RZ(1.2, wires=[0]), RY(2.3, wires=[0]), RZ(3.4, wires=[0])]
"""
decomp_ops = [
RZ(phi, wires=wires),
RY(theta, wires=wires),
RZ(omega, wires=wires),
]
return decomp_ops
def adjoint(self):
phi, theta, omega = self.parameters
return Rot(-omega, -theta, -phi, wires=self.wires)
def _controlled(self, wire):
CRot(*self.parameters, wires=wire + self.wires)
def single_qubit_rot_angles(self):
return self.data
class MultiRZ(Operation):
r"""
Arbitrary multi Z rotation.
.. math::
MultiRZ(\theta) = \exp(-i \frac{\theta}{2} Z^{\otimes n})
**Details:**
* Number of wires: Any
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\theta}f(MultiRZ(\theta)) = \frac{1}{2}\left[f(MultiRZ(\theta +\pi/2)) - f(MultiRZ(\theta-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`MultiRZ(\theta)`.
.. note::
If the ``MultiRZ`` gate is not supported on the targeted device, PennyLane
will decompose the gate using :class:`~.RZ` and :class:`~.CNOT` gates.
Args:
theta (tensor_like or float): rotation angle :math:`\theta`
wires (Sequence[int] or int): the wires the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = AnyWires
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,)]
def __init__(self, theta, wires=None, do_queue=True, id=None):
wires = Wires(wires)
self.hyperparameters["num_wires"] = len(wires)
super().__init__(theta, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(theta, num_wires): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.MultiRZ.matrix`
Args:
theta (tensor_like or float): rotation angle
num_wires (int): number of wires the rotation acts on
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.MultiRZ.compute_matrix(torch.tensor(0.1), 2)
tensor([[0.9988-0.0500j, 0.0000+0.0000j, 0.0000+0.0000j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.9988+0.0500j, 0.0000+0.0000j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.0000+0.0000j, 0.9988+0.0500j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.0000+0.0000j, 0.0000+0.0000j, 0.9988-0.0500j]])
"""
eigs = qml.math.convert_like(pauli_eigs(num_wires), theta)
if qml.math.get_interface(theta) == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
eigs = qml.math.cast_like(eigs, 1j)
if qml.math.ndim(theta) > 0:
eigvals = [qml.math.exp(-0.5j * t * eigs) for t in theta]
return qml.math.stack([qml.math.diag(eig) for eig in eigvals])
eigvals = qml.math.exp(-0.5j * theta * eigs)
return qml.math.diag(eigvals)
def generator(self):
return -0.5 * functools.reduce(matmul, [qml.PauliZ(w) for w in self.wires])
@staticmethod
def compute_eigvals(theta, num_wires): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.MultiRZ.eigvals`
Args:
theta (tensor_like or float): rotation angle
num_wires (int): number of wires the rotation acts on
Returns:
tensor_like: eigenvalues
**Example**
>>> qml.MultiRZ.compute_eigvals(torch.tensor(0.5), 3)
tensor([0.9689-0.2474j, 0.9689+0.2474j, 0.9689+0.2474j, 0.9689-0.2474j,
0.9689+0.2474j, 0.9689-0.2474j, 0.9689-0.2474j, 0.9689+0.2474j])
"""
eigs = qml.math.convert_like(pauli_eigs(num_wires), theta)
if qml.math.get_interface(theta) == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
eigs = qml.math.cast_like(eigs, 1j)
if qml.math.ndim(theta) > 0:
return qml.math.exp(qml.math.tensordot(-0.5j * theta, eigs, axes=0))
return qml.math.exp(-0.5j * theta * eigs)
@staticmethod
def compute_decomposition(
theta, wires, **kwargs
): # pylint: disable=arguments-differ,unused-argument
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.MultiRZ.decomposition`.
Args:
theta (float): rotation angle :math:`\theta`
wires (Iterable, Wires): the wires the operation acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.MultiRZ.compute_decomposition(1.2, wires=(0,1))
[CNOT(wires=[1, 0]), RZ(1.2, wires=[0]), CNOT(wires=[1, 0])]
"""
ops = [qml.CNOT(wires=(w0, w1)) for w0, w1 in zip(wires[~0:0:-1], wires[~1::-1])]
ops.append(RZ(theta, wires=wires[0]))
ops += [qml.CNOT(wires=(w0, w1)) for w0, w1 in zip(wires[1:], wires[:~0])]
return ops
def adjoint(self):
return MultiRZ(-self.parameters[0], wires=self.wires)
def pow(self, z):
return [MultiRZ(self.data[0] * z, wires=self.wires)]
class PauliRot(Operation):
r"""
Arbitrary Pauli word rotation.
.. math::
RP(\theta, P) = \exp(-i \frac{\theta}{2} P)
**Details:**
* Number of wires: Any
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\theta}f(RP(\theta)) = \frac{1}{2}\left[f(RP(\theta +\pi/2)) - f(RP(\theta-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`RP(\theta)`.
.. note::
If the ``PauliRot`` gate is not supported on the targeted device, PennyLane
will decompose the gate using :class:`~.RX`, :class:`~.Hadamard`, :class:`~.RZ`
and :class:`~.CNOT` gates.
Args:
theta (float): rotation angle :math:`\theta`
pauli_word (string): the Pauli word defining the rotation
wires (Sequence[int] or int): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
**Example**
>>> dev = qml.device('default.qubit', wires=1)
>>> @qml.qnode(dev)
... def example_circuit():
... qml.PauliRot(0.5, 'X', wires=0)
... return qml.expval(qml.PauliZ(0))
>>> print(example_circuit())
0.8775825618903724
"""
num_wires = AnyWires
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
do_check_domain = False
grad_method = "A"
parameter_frequencies = [(1,)]
_ALLOWED_CHARACTERS = "IXYZ"
_PAULI_CONJUGATION_MATRICES = {
"X": Hadamard.compute_matrix(),
"Y": RX.compute_matrix(np.pi / 2),
"Z": np.array([[1, 0], [0, 1]]),
}
def __init__(self, theta, pauli_word, wires=None, do_queue=True, id=None):
super().__init__(theta, wires=wires, do_queue=do_queue, id=id)
self.hyperparameters["pauli_word"] = pauli_word
if not PauliRot._check_pauli_word(pauli_word):
raise ValueError(
f'The given Pauli word "{pauli_word}" contains characters that are not allowed.'
" Allowed characters are I, X, Y and Z"
)
num_wires = 1 if isinstance(wires, int) else len(wires)
if not len(pauli_word) == num_wires:
raise ValueError(
f"The given Pauli word has length {len(pauli_word)}, length "
f"{num_wires} was expected for wires {wires}"
)
def label(self, decimals=None, base_label=None, cache=None):
r"""A customizable string representation of the operator.
Args:
decimals=None (int): If ``None``, no parameters are included. Else,
specifies how to round the parameters.
base_label=None (str): overwrite the non-parameter component of the label
cache=None (dict): dictionary that caries information between label calls
in the same drawing
Returns:
str: label to use in drawings
**Example:**
>>> op = qml.PauliRot(0.1, "XYY", wires=(0,1,2))
>>> op.label()
'RXYY'
>>> op.label(decimals=2)
'RXYY\n(0.10)'
>>> op.label(base_label="PauliRot")
'PauliRot\n(0.10)'
"""
pauli_word = self.hyperparameters["pauli_word"]
op_label = base_label or ("R" + pauli_word)
if self.inverse:
op_label += "⁻¹"
# TODO[dwierichs]: Implement a proper label for parameter-broadcasted operators
if decimals is not None and self.batch_size is None:
param_string = f"\n({qml.math.asarray(self.parameters[0]):.{decimals}f})"
op_label += param_string
return op_label
@staticmethod
def _check_pauli_word(pauli_word):
"""Check that the given Pauli word has correct structure.
Args:
pauli_word (str): Pauli word to be checked
Returns:
bool: Whether the Pauli word has correct structure.
"""
return all(pauli in PauliRot._ALLOWED_CHARACTERS for pauli in set(pauli_word))
@staticmethod
def compute_matrix(theta, pauli_word): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.PauliRot.matrix`
Args:
theta (tensor_like or float): rotation angle
pauli_word (str): string representation of Pauli word
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.PauliRot.compute_matrix(0.5, 'X')
[[9.6891e-01+4.9796e-18j 2.7357e-17-2.4740e-01j]
[2.7357e-17-2.4740e-01j 9.6891e-01+4.9796e-18j]]
"""
if not PauliRot._check_pauli_word(pauli_word):
raise ValueError(
f'The given Pauli word "{pauli_word}" contains characters that are not allowed.'
" Allowed characters are I, X, Y and Z"
)
interface = qml.math.get_interface(theta)
if interface == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
# Simplest case is if the Pauli is the identity matrix
if set(pauli_word) == {"I"}:
exp = qml.math.exp(-0.5j * theta)
iden = qml.math.eye(2 ** len(pauli_word), like=theta)
if qml.math.get_interface(theta) == "tensorflow":
iden = qml.math.cast_like(iden, 1j)
if qml.math.ndim(theta) == 0:
return exp * iden
return qml.math.stack([e * iden for e in exp])
# We first generate the matrix excluding the identity parts and expand it afterwards.
# To this end, we have to store on which wires the non-identity parts act
non_identity_wires, non_identity_gates = zip(
*[(wire, gate) for wire, gate in enumerate(pauli_word) if gate != "I"]
)
multi_Z_rot_matrix = MultiRZ.compute_matrix(theta, len(non_identity_gates))
# now we conjugate with Hadamard and RX to create the Pauli string
conjugation_matrix = functools.reduce(
qml.math.kron,
[PauliRot._PAULI_CONJUGATION_MATRICES[gate] for gate in non_identity_gates],
)
if interface == "tensorflow":
conjugation_matrix = qml.math.cast_like(conjugation_matrix, 1j)
# Note: we use einsum with reverse arguments here because it is not multi-dispatched
# and the tensordot containing multi_Z_rot_matrix should decide about the interface
return expand_matrix(
qml.math.einsum(
"...jk,ij->...ik",
qml.math.tensordot(multi_Z_rot_matrix, conjugation_matrix, axes=[[-1], [0]]),
qml.math.conj(conjugation_matrix),
),
non_identity_wires,
list(range(len(pauli_word))),
)
def generator(self):
pauli_word = self.hyperparameters["pauli_word"]
wire_map = {w: i for i, w in enumerate(self.wires)}
return -0.5 * qml.grouping.string_to_pauli_word(pauli_word, wire_map=wire_map)
@staticmethod
def compute_eigvals(theta, pauli_word): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.PauliRot.eigvals`
Returns:
tensor_like: eigenvalues
**Example**
>>> qml.PauliRot.compute_eigvals(torch.tensor(0.5), "X")
tensor([0.9689-0.2474j, 0.9689+0.2474j])
"""
if qml.math.get_interface(theta) == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
# Identity must be treated specially because its eigenvalues are all the same
if set(pauli_word) == {"I"}:
exp = qml.math.exp(-0.5j * theta)
ones = qml.math.ones(2 ** len(pauli_word), like=theta)
if qml.math.get_interface(theta) == "tensorflow":
ones = qml.math.cast_like(ones, 1j)
if qml.math.ndim(theta) == 0:
return exp * ones
return qml.math.tensordot(exp, ones, axes=0)
return MultiRZ.compute_eigvals(theta, len(pauli_word))
@staticmethod
def compute_decomposition(theta, wires, pauli_word):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.PauliRot.decomposition`.
Args:
theta (float): rotation angle :math:`\theta`
pauli_word (string): the Pauli word defining the rotation
wires (Iterable, Wires): the wires the operation acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.PauliRot.compute_decomposition(1.2, "XY", wires=(0,1))
[Hadamard(wires=[0]),
RX(1.5707963267948966, wires=[1]),
MultiRZ(1.2, wires=[0, 1]),
Hadamard(wires=[0]),
RX(-1.5707963267948966, wires=[1])]
"""
if isinstance(wires, int): # Catch cases when the wire is passed as a single int.
wires = [wires]
# Check for identity and do nothing
if set(pauli_word) == {"I"}:
return []
active_wires, active_gates = zip(
*[(wire, gate) for wire, gate in zip(wires, pauli_word) if gate != "I"]
)
ops = []
for wire, gate in zip(active_wires, active_gates):
if gate == "X":
ops.append(Hadamard(wires=[wire]))
elif gate == "Y":
ops.append(RX(np.pi / 2, wires=[wire]))
ops.append(MultiRZ(theta, wires=list(active_wires)))
for wire, gate in zip(active_wires, active_gates):
if gate == "X":
ops.append(Hadamard(wires=[wire]))
elif gate == "Y":
ops.append(RX(-np.pi / 2, wires=[wire]))
return ops
def adjoint(self):
return PauliRot(-self.parameters[0], self.hyperparameters["pauli_word"], wires=self.wires)
def pow(self, z):
return [PauliRot(self.data[0] * z, self.hyperparameters["pauli_word"], wires=self.wires)]
class CRX(Operation):
r"""
The controlled-RX operator
.. math::
\begin{align}
CR_x(\phi) &=
\begin{bmatrix}
& 1 & 0 & 0 & 0 \\
& 0 & 1 & 0 & 0\\
& 0 & 0 & \cos(\phi/2) & -i\sin(\phi/2)\\
& 0 & 0 & -i\sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
\end{align}
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: The controlled-RX operator satisfies a four-term parameter-shift rule
(see Appendix F, https://doi.org/10.1088/1367-2630/ac2cb3):
.. math::
\frac{d}{d\phi}f(CR_x(\phi)) = c_+ \left[f(CR_x(\phi+a)) - f(CR_x(\phi-a))\right] - c_- \left[f(CR_x(\phi+b)) - f(CR_x(\phi-b))\right]
where :math:`f` is an expectation value depending on :math:`CR_x(\phi)`, and
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "X"
grad_method = "A"
parameter_frequencies = [(0.5, 1.0)]
def generator(self):
return -0.5 * qml.Projector(np.array([1]), wires=self.wires[0]) @ qml.PauliX(self.wires[1])
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
def label(self, decimals=None, base_label=None, cache=None):
return super().label(decimals=decimals, base_label=base_label or "RX", cache=cache)
@staticmethod
def compute_matrix(theta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.CRX.matrix`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.CRX.compute_matrix(torch.tensor(0.5))
tensor([[1.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[0.0+0.0j, 1.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[0.0+0.0j, 0.0+0.0j, 0.9689+0.0j, 0.0-0.2474j],
[0.0+0.0j, 0.0+0.0j, 0.0-0.2474j, 0.9689+0.0j]])
"""
interface = qml.math.get_interface(theta)
c = qml.math.cos(theta / 2)
s = qml.math.sin(theta / 2)
if interface == "tensorflow":
c = qml.math.cast_like(c, 1j)
s = qml.math.cast_like(s, 1j)
# The following avoids casting an imaginary quantity to reals when backpropagating
c = (1 + 0j) * c
js = -1j * s
ones = qml.math.ones_like(js)
zeros = qml.math.zeros_like(js)
matrix = [
[ones, zeros, zeros, zeros],
[zeros, ones, zeros, zeros],
[zeros, zeros, c, js],
[zeros, zeros, js, c],
]
return qml.math.stack([stack_last(row) for row in matrix], axis=-2)
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.CRot.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Iterable, Wires): the wires the operation acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.CRX.compute_decomposition(1.2, wires=(0,1))
[RZ(1.5707963267948966, wires=[1]),
RY(0.6, wires=[1]),
CNOT(wires=[0, 1]),
RY(-0.6, wires=[1]),
CNOT(wires=[0, 1]),
RZ(-1.5707963267948966, wires=[1])]
"""
pi_half = qml.math.ones_like(phi) * (np.pi / 2)
decomp_ops = [
RZ(pi_half, wires=wires[1]),
RY(phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RY(-phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RZ(-pi_half, wires=wires[1]),
]
return decomp_ops
def adjoint(self):
return CRX(-self.data[0], wires=self.wires)
def pow(self, z):
return [CRX(self.data[0] * z, wires=self.wires)]
@property
def control_wires(self):
return Wires(self.wires[0])
class CRY(Operation):
r"""
The controlled-RY operator
.. math::
\begin{align}
CR_y(\phi) &=
\begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & \cos(\phi/2) & -\sin(\phi/2)\\
0 & 0 & \sin(\phi/2) & \cos(\phi/2)
\end{bmatrix}.
\end{align}
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: The controlled-RY operator satisfies a four-term parameter-shift rule
(see Appendix F, https://doi.org/10.1088/1367-2630/ac2cb3):
.. math::
\frac{d}{d\phi}f(CR_y(\phi)) = c_+ \left[f(CR_y(\phi+a)) - f(CR_y(\phi-a))\right] - c_- \left[f(CR_y(\phi+b)) - f(CR_y(\phi-b))\right]
where :math:`f` is an expectation value depending on :math:`CR_y(\phi)`, and
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "Y"
grad_method = "A"
parameter_frequencies = [(0.5, 1.0)]
def generator(self):
return -0.5 * qml.Projector(np.array([1]), wires=self.wires[0]) @ qml.PauliY(self.wires[1])
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
def label(self, decimals=None, base_label=None, cache=None):
return super().label(decimals=decimals, base_label=base_label or "RY", cache=cache)
@staticmethod
def compute_matrix(theta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.CRY.matrix`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.CRY.compute_matrix(torch.tensor(0.5))
tensor([[ 1.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 1.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.9689, -0.2474],
[ 0.0000, 0.0000, 0.2474, 0.9689]], dtype=torch.float64)
"""
interface = qml.math.get_interface(theta)
c = qml.math.cos(theta / 2)
s = qml.math.sin(theta / 2)
if interface == "tensorflow":
c = qml.math.cast_like(c, 1j)
s = qml.math.cast_like(s, 1j)
# The following avoids casting an imaginary quantity to reals when backpropagating
c = (1 + 0j) * c
s = (1 + 0j) * s
ones = qml.math.ones_like(s)
zeros = qml.math.zeros_like(s)
matrix = [
[ones, zeros, zeros, zeros],
[zeros, ones, zeros, zeros],
[zeros, zeros, c, -s],
[zeros, zeros, s, c],
]
return qml.math.stack([stack_last(row) for row in matrix], axis=-2)
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.CRY.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Iterable, Wires): wires that the operator acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.CRY.compute_decomposition(1.2, wires=(0,1))
[RY(0.6, wires=[1]),
CNOT(wires=[0, 1]),
RY(-0.6, wires=[1]),
CNOT(wires=[0, 1])]
"""
decomp_ops = [
RY(phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RY(-phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
]
return decomp_ops
def adjoint(self):
return CRY(-self.data[0], wires=self.wires)
def pow(self, z):
return [CRY(self.data[0] * z, wires=self.wires)]
@property
def control_wires(self):
return Wires(self.wires[0])
class CRZ(Operation):
r"""
The controlled-RZ operator
.. math::
\begin{align}
CR_z(\phi) &=
\begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & e^{-i\phi/2} & 0\\
0 & 0 & 0 & e^{i\phi/2}
\end{bmatrix}.
\end{align}
.. note:: The subscripts of the operations in the formula refer to the wires they act on, e.g. 1 corresponds to the first element in ``wires`` that is the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: The controlled-RZ operator satisfies a four-term parameter-shift rule
(see Appendix F, https://doi.org/10.1088/1367-2630/ac2cb3):
.. math::
\frac{d}{d\phi}f(CR_z(\phi)) = c_+ \left[f(CR_z(\phi+a)) - f(CR_z(\phi-a))\right] - c_- \left[f(CR_z(\phi+b)) - f(CR_z(\phi-b))\right]
where :math:`f` is an expectation value depending on :math:`CR_z(\phi)`, and
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int]): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
basis = "Z"
grad_method = "A"
parameter_frequencies = [(0.5, 1.0)]
def generator(self):
return -0.5 * qml.Projector(np.array([1]), wires=self.wires[0]) @ qml.PauliZ(self.wires[1])
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
def label(self, decimals=None, base_label=None, cache=None):
return super().label(decimals=decimals, base_label=base_label or "RZ", cache=cache)
@staticmethod
def compute_matrix(theta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.CRZ.matrix`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.CRZ.compute_matrix(torch.tensor(0.5))
tensor([[1.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[0.0+0.0j, 1.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[0.0+0.0j, 0.0+0.0j, 0.9689-0.2474j, 0.0+0.0j],
[0.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.9689+0.2474j]])
"""
if qml.math.get_interface(theta) == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
exp_part = qml.math.exp(-1j * theta / 2)
ones = qml.math.ones_like(exp_part)
zeros = qml.math.zeros_like(exp_part)
matrix = [
[ones, zeros, zeros, zeros],
[zeros, ones, zeros, zeros],
[zeros, zeros, exp_part, zeros],
[zeros, zeros, zeros, qml.math.conj(exp_part)],
]
return qml.math.stack([stack_last(row) for row in matrix], axis=-2)
@staticmethod
def compute_eigvals(theta): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.CRZ.eigvals`
Args:
theta (tensor_like or float): rotation angle
Returns:
tensor_like: eigenvalues
**Example**
>>> qml.CRZ.compute_eigvals(torch.tensor(0.5))
tensor([1.0000+0.0000j, 1.0000+0.0000j, 0.9689-0.2474j, 0.9689+0.2474j])
"""
if qml.math.get_interface(theta) == "tensorflow":
theta = qml.math.cast_like(theta, 1j)
exp_part = qml.math.exp(-0.5j * theta)
o = qml.math.ones_like(exp_part)
return stack_last([o, o, exp_part, qml.math.conj(exp_part)])
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.CRZ.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Iterable, Wires): wires that the operator acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.CRZ.compute_decomposition(1.2, wires=(0,1))
[PhaseShift(0.6, wires=[1]),
CNOT(wires=[0, 1]),
PhaseShift(-0.6, wires=[1]),
CNOT(wires=[0, 1])]
"""
decomp_ops = [
PhaseShift(phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
PhaseShift(-phi / 2, wires=wires[1]),
qml.CNOT(wires=wires),
]
return decomp_ops
def adjoint(self):
return CRZ(-self.data[0], wires=self.wires)
def pow(self, z):
return [CRZ(self.data[0] * z, wires=self.wires)]
@property
def control_wires(self):
return Wires(self.wires[0])
class CRot(Operation):
r"""
The controlled-Rot operator
.. math:: CR(\phi, \theta, \omega) = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & e^{-i(\phi+\omega)/2}\cos(\theta/2) & -e^{i(\phi-\omega)/2}\sin(\theta/2)\\
0 & 0 & e^{-i(\phi-\omega)/2}\sin(\theta/2) & e^{i(\phi+\omega)/2}\cos(\theta/2)
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 3
* Number of dimensions per parameter: (0, 0, 0)
* Gradient recipe: The controlled-Rot operator satisfies a four-term parameter-shift rule
(see Appendix F, https://doi.org/10.1088/1367-2630/ac2cb3):
.. math::
\frac{d}{d\mathbf{x}_i}f(CR(\mathbf{x}_i)) = c_+ \left[f(CR(\mathbf{x}_i+a)) - f(CR(\mathbf{x}_i-a))\right] - c_- \left[f(CR(\mathbf{x}_i+b)) - f(CR(\mathbf{x}_i-b))\right]
where :math:`f` is an expectation value depending on :math:`CR(\mathbf{x}_i)`, and
- :math:`\mathbf{x} = (\phi, \theta, \omega)` and `i` is an index to :math:`\mathbf{x}`
- :math:`a = \pi/2`
- :math:`b = 3\pi/2`
- :math:`c_{\pm} = (\sqrt{2} \pm 1)/{4\sqrt{2}}`
Args:
phi (float): rotation angle :math:`\phi`
theta (float): rotation angle :math:`\theta`
omega (float): rotation angle :math:`\omega`
wires (Sequence[int]): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 3
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0, 0, 0)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(0.5, 1.0), (0.5, 1.0), (0.5, 1.0)]
def __init__(self, phi, theta, omega, wires, do_queue=True, id=None):
super().__init__(phi, theta, omega, wires=wires, do_queue=do_queue, id=id)
def label(self, decimals=None, base_label=None, cache=None):
return super().label(decimals=decimals, base_label=base_label or "Rot", cache=cache)
@staticmethod
def compute_matrix(phi, theta, omega): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.CRot.matrix`
Args:
phi(tensor_like or float): first rotation angle
theta (tensor_like or float): second rotation angle
omega (tensor_like or float): third rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.CRot.compute_matrix(torch.tensor(0.1), torch.tensor(0.2), torch.tensor(0.3))
tensor([[ 1.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[ 0.0+0.0j, 1.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[ 0.0+0.0j, 0.0+0.0j, 0.9752-0.1977j, -0.0993+0.0100j],
[ 0.0+0.0j, 0.0+0.0j, 0.0993+0.0100j, 0.9752+0.1977j]])
"""
# It might be that they are in different interfaces, e.g.,
# CRot(0.2, 0.3, tf.Variable(0.5), wires=[0, 1])
# So we need to make sure the matrix comes out having the right type
interface = qml.math._multi_dispatch([phi, theta, omega])
c = qml.math.cos(theta / 2)
s = qml.math.sin(theta / 2)
# If anything is not tensorflow, it has to be casted
if interface == "tensorflow":
phi = qml.math.cast_like(qml.math.asarray(phi, like=interface), 1j)
omega = qml.math.cast_like(qml.math.asarray(omega, like=interface), 1j)
c = qml.math.cast_like(qml.math.asarray(c, like=interface), 1j)
s = qml.math.cast_like(qml.math.asarray(s, like=interface), 1j)
# The following variable is used to assert the all terms to be stacked have same shape
one = qml.math.ones_like(phi) * qml.math.ones_like(omega)
c = c * one
s = s * one
o = qml.math.ones_like(c)
z = qml.math.zeros_like(c)
mat = [
[o, z, z, z],
[z, o, z, z],
[
z,
z,
qml.math.exp(-0.5j * (phi + omega)) * c,
-qml.math.exp(0.5j * (phi - omega)) * s,
],
[
z,
z,
qml.math.exp(-0.5j * (phi - omega)) * s,
qml.math.exp(0.5j * (phi + omega)) * c,
],
]
return qml.math.stack([stack_last(row) for row in mat], axis=-2)
@staticmethod
def compute_decomposition(phi, theta, omega, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.CRot.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
theta (float): rotation angle :math:`\theta`
omega (float): rotation angle :math:`\omega`
wires (Iterable, Wires): the wires the operation acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.PhaseShift.compute_decomposition(1.234, wires=0)
[RZ(-1.1, wires=[1]),
CNOT(wires=[0, 1]),
RZ(-2.3, wires=[1]),
RY(-1.15, wires=[1]),
CNOT(wires=[0, 1]),
RY(1.15, wires=[1]),
RZ(3.4, wires=[1])]
"""
decomp_ops = [
RZ((phi - omega) / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RZ(-(phi + omega) / 2, wires=wires[1]),
RY(-theta / 2, wires=wires[1]),
qml.CNOT(wires=wires),
RY(theta / 2, wires=wires[1]),
RZ(omega, wires=wires[1]),
]
return decomp_ops
def adjoint(self):
phi, theta, omega = self.parameters
return CRot(-omega, -theta, -phi, wires=self.wires)
@property
def control_wires(self):
return Wires(self.wires[0])
class U1(Operation):
r"""
U1 gate.
.. math:: U_1(\phi) = e^{i\phi/2}R_z(\phi) = \begin{bmatrix}
1 & 0 \\
0 & e^{i\phi}
\end{bmatrix}.
.. note::
The ``U1`` gate is an alias for the phase shift operation :class:`~.PhaseShift`.
**Details:**
* Number of wires: 1
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(U_1(\phi)) = \frac{1}{2}\left[f(U_1(\phi+\pi/2)) - f(U_1(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`U_1(\phi)`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Sequence[int] or int): the wire the operation acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return qml.Projector(np.array([1]), wires=self.wires)
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(phi): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.U1.matrix`
Args:
phi (tensor_like or float): rotation angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.U1.compute_matrix(torch.tensor(0.5))
tensor([[1.0000+0.0000j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.8776+0.4794j]])
"""
if qml.math.get_interface(phi) == "tensorflow":
phi = qml.math.cast_like(phi, 1j)
p = qml.math.exp(1j * phi)
z = qml.math.zeros_like(p)
return qml.math.stack([stack_last([qml.math.ones_like(p), z]), stack_last([z, p])], axis=-2)
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.U1.decomposition`.
Args:
phi (float): rotation angle :math:`\phi`
wires (Any, Wires): Wire that the operator acts on.
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.U1.compute_decomposition(1.234, wires=0)
[PhaseShift(1.234, wires=[0])]
"""
return [PhaseShift(phi, wires=wires)]
def adjoint(self):
return U1(-self.data[0], wires=self.wires)
def pow(self, z):
return [U1(self.data[0] * z, wires=self.wires)]
class U2(Operation):
r"""
U2 gate.
.. math::
U_2(\phi, \delta) = \frac{1}{\sqrt{2}}\begin{bmatrix} 1 & -\exp(i \delta)
\\ \exp(i \phi) & \exp(i (\phi + \delta)) \end{bmatrix}
The :math:`U_2` gate is related to the single-qubit rotation :math:`R` (:class:`Rot`) and the
:math:`R_\phi` (:class:`PhaseShift`) gates via the following relation:
.. math::
U_2(\phi, \delta) = R_\phi(\phi+\delta) R(\delta,\pi/2,-\delta)
.. note::
If the ``U2`` gate is not supported on the targeted device, PennyLane
will attempt to decompose the gate into :class:`~.Rot` and :class:`~.PhaseShift` gates.
**Details:**
* Number of wires: 1
* Number of parameters: 2
* Number of dimensions per parameter: (0, 0)
* Gradient recipe: :math:`\frac{d}{d\phi}f(U_2(\phi, \delta)) = \frac{1}{2}\left[f(U_2(\phi+\pi/2, \delta)) - f(U_2(\phi-\pi/2, \delta))\right]`
where :math:`f` is an expectation value depending on :math:`U_2(\phi, \delta)`.
This gradient recipe applies for each angle argument :math:`\{\phi, \delta\}`.
Args:
phi (float): azimuthal angle :math:`\phi`
delta (float): quantum phase :math:`\delta`
wires (Sequence[int] or int): the subsystem the gate acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 2
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0, 0)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,), (1,)]
def __init__(self, phi, delta, wires, do_queue=True, id=None):
super().__init__(phi, delta, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(phi, delta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.U2.matrix`
Args:
phi (tensor_like or float): azimuthal angle
delta (tensor_like or float): quantum phase
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.U2.compute_matrix(torch.tensor(0.1), torch.tensor(0.2))
tensor([[ 0.7071+0.0000j, -0.6930-0.1405j],
[ 0.7036+0.0706j, 0.6755+0.2090j]])
"""
interface = qml.math._multi_dispatch([phi, delta])
# If anything is not tensorflow, it has to be casted and then
if interface == "tensorflow":
phi = qml.math.cast_like(qml.math.asarray(phi, like=interface), 1j)
delta = qml.math.cast_like(qml.math.asarray(delta, like=interface), 1j)
one = qml.math.ones_like(phi) * qml.math.ones_like(delta)
mat = [
[one, -qml.math.exp(1j * delta) * one],
[qml.math.exp(1j * phi) * one, qml.math.exp(1j * (phi + delta))],
]
return INV_SQRT2 * qml.math.stack([stack_last(row) for row in mat], axis=-2)
@staticmethod
def compute_decomposition(phi, delta, wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.U2.decomposition`.
Args:
phi (float): azimuthal angle :math:`\phi`
delta (float): quantum phase :math:`\delta`
wires (Iterable, Wires): the subsystem the gate acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.U2.compute_decomposition(1.23, 2.34, wires=0)
[Rot(2.34, 1.5707963267948966, -2.34, wires=[0]),
PhaseShift(2.34, wires=[0]),
PhaseShift(1.23, wires=[0])]
"""
pi_half = qml.math.ones_like(delta) * (np.pi / 2)
decomp_ops = [
Rot(delta, pi_half, -delta, wires=wires),
PhaseShift(delta, wires=wires),
PhaseShift(phi, wires=wires),
]
return decomp_ops
def adjoint(self):
phi, delta = self.parameters
new_delta = qml.math.mod((np.pi - phi), (2 * np.pi))
new_phi = qml.math.mod((np.pi - delta), (2 * np.pi))
return U2(new_phi, new_delta, wires=self.wires)
class U3(Operation):
r"""
Arbitrary single qubit unitary.
.. math::
U_3(\theta, \phi, \delta) = \begin{bmatrix} \cos(\theta/2) & -\exp(i \delta)\sin(\theta/2) \\
\exp(i \phi)\sin(\theta/2) & \exp(i (\phi + \delta))\cos(\theta/2) \end{bmatrix}
The :math:`U_3` gate is related to the single-qubit rotation :math:`R` (:class:`Rot`) and the
:math:`R_\phi` (:class:`PhaseShift`) gates via the following relation:
.. math::
U_3(\theta, \phi, \delta) = R_\phi(\phi+\delta) R(\delta,\theta,-\delta)
.. note::
If the ``U3`` gate is not supported on the targeted device, PennyLane
will attempt to decompose the gate into :class:`~.PhaseShift` and :class:`~.Rot` gates.
**Details:**
* Number of wires: 1
* Number of parameters: 3
* Number of dimensions per parameter: (0, 0, 0)
* Gradient recipe: :math:`\frac{d}{d\phi}f(U_3(\theta, \phi, \delta)) = \frac{1}{2}\left[f(U_3(\theta+\pi/2, \phi, \delta)) - f(U_3(\theta-\pi/2, \phi, \delta))\right]`
where :math:`f` is an expectation value depending on :math:`U_3(\theta, \phi, \delta)`.
This gradient recipe applies for each angle argument :math:`\{\theta, \phi, \delta\}`.
Args:
theta (float): polar angle :math:`\theta`
phi (float): azimuthal angle :math:`\phi`
delta (float): quantum phase :math:`\delta`
wires (Sequence[int] or int): the subsystem the gate acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 1
num_params = 3
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0, 0, 0)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,), (1,), (1,)]
def __init__(self, theta, phi, delta, wires, do_queue=True, id=None):
super().__init__(theta, phi, delta, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(theta, phi, delta): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.U3.matrix`
Args:
theta (tensor_like or float): polar angle
phi (tensor_like or float): azimuthal angle
delta (tensor_like or float): quantum phase
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.U3.compute_matrix(torch.tensor(0.1), torch.tensor(0.2), torch.tensor(0.3))
tensor([[ 0.9988+0.0000j, -0.0477-0.0148j],
[ 0.0490+0.0099j, 0.8765+0.4788j]])
"""
# It might be that they are in different interfaces, e.g.,
# U3(0.2, 0.3, tf.Variable(0.5), wires=0)
# So we need to make sure the matrix comes out having the right type
interface = qml.math._multi_dispatch([theta, phi, delta])
c = qml.math.cos(theta / 2)
s = qml.math.sin(theta / 2)
# If anything is not tensorflow, it has to be casted and then
if interface == "tensorflow":
phi = qml.math.cast_like(qml.math.asarray(phi, like=interface), 1j)
delta = qml.math.cast_like(qml.math.asarray(delta, like=interface), 1j)
c = qml.math.cast_like(qml.math.asarray(c, like=interface), 1j)
s = qml.math.cast_like(qml.math.asarray(s, like=interface), 1j)
# The following variable is used to assert the all terms to be stacked have same shape
one = qml.math.ones_like(phi) * qml.math.ones_like(delta)
c = c * one
s = s * one
mat = [
[c, -s * qml.math.exp(1j * delta)],
[s * qml.math.exp(1j * phi), c * qml.math.exp(1j * (phi + delta))],
]
return qml.math.stack([stack_last(row) for row in mat], axis=-2)
@staticmethod
def compute_decomposition(theta, phi, delta, wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.U3.decomposition`.
Args:
theta (float): polar angle :math:`\theta`
phi (float): azimuthal angle :math:`\phi`
delta (float): quantum phase :math:`\delta`
wires (Iterable, Wires): the subsystem the gate acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.U3.compute_decomposition(1.23, 2.34, 3.45, wires=0)
[Rot(3.45, 1.23, -3.45, wires=[0]),
PhaseShift(3.45, wires=[0]),
PhaseShift(2.34, wires=[0])]
"""
decomp_ops = [
Rot(delta, theta, -delta, wires=wires),
PhaseShift(delta, wires=wires),
PhaseShift(phi, wires=wires),
]
return decomp_ops
def adjoint(self):
theta, phi, delta = self.parameters
new_delta = qml.math.mod((np.pi - phi), (2 * np.pi))
new_phi = qml.math.mod((np.pi - delta), (2 * np.pi))
return U3(theta, new_phi, new_delta, wires=self.wires)
class IsingXX(Operation):
r"""
Ising XX coupling gate
.. math:: XX(\phi) = \begin{bmatrix}
\cos(\phi / 2) & 0 & 0 & -i \sin(\phi / 2) \\
0 & \cos(\phi / 2) & -i \sin(\phi / 2) & 0 \\
0 & -i \sin(\phi / 2) & \cos(\phi / 2) & 0 \\
-i \sin(\phi / 2) & 0 & 0 & \cos(\phi / 2)
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(XX(\phi)) = \frac{1}{2}\left[f(XX(\phi +\pi/2)) - f(XX(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`XX(\phi)`.
Args:
phi (float): the phase angle
wires (int): the subsystem the gate acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return -0.5 * PauliX(wires=self.wires[0]) @ PauliX(wires=self.wires[1])
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_matrix(phi): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
.. seealso:: :meth:`~.IsingXX.matrix`
Args:
phi (tensor_like or float): phase angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.IsingXX.compute_matrix(torch.tensor(0.5))
tensor([[0.9689+0.0000j, 0.0000+0.0000j, 0.0000+0.0000j, 0.0000-0.2474j],
[0.0000+0.0000j, 0.9689+0.0000j, 0.0000-0.2474j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.0000-0.2474j, 0.9689+0.0000j, 0.0000+0.0000j],
[0.0000-0.2474j, 0.0000+0.0000j, 0.0000+0.0000j, 0.9689+0.0000j]],
dtype=torch.complex128)
"""
c = qml.math.cos(phi / 2)
s = qml.math.sin(phi / 2)
if qml.math.get_interface(phi) == "tensorflow":
c = qml.math.cast_like(c, 1j)
s = qml.math.cast_like(s, 1j)
# The following avoids casting an imaginary quantity to reals when backpropagating
c = (1 + 0j) * c
js = -1j * s
z = qml.math.zeros_like(js)
matrix = [
[c, z, z, js],
[z, c, js, z],
[z, js, c, z],
[js, z, z, c],
]
return qml.math.stack([stack_last(row) for row in matrix], axis=-2)
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.IsingXX.decomposition`.
Args:
phi (float): the phase angle
wires (Iterable, Wires): the subsystem the gate acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.IsingXX.compute_decomposition(1.23, wires=(0,1))
[CNOT(wires=[0, 1]), RX(1.23, wires=[0]), CNOT(wires=[0, 1]]
"""
decomp_ops = [
qml.CNOT(wires=wires),
RX(phi, wires=[wires[0]]),
qml.CNOT(wires=wires),
]
return decomp_ops
def adjoint(self):
(phi,) = self.parameters
return IsingXX(-phi, wires=self.wires)
def pow(self, z):
return [IsingXX(self.data[0] * z, wires=self.wires)]
class IsingYY(Operation):
r"""
Ising YY coupling gate
.. math:: \mathtt{YY}(\phi) = \begin{bmatrix}
\cos(\phi / 2) & 0 & 0 & i \sin(\phi / 2) \\
0 & \cos(\phi / 2) & -i \sin(\phi / 2) & 0 \\
0 & -i \sin(\phi / 2) & \cos(\phi / 2) & 0 \\
i \sin(\phi / 2) & 0 & 0 & \cos(\phi / 2)
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(YY(\phi)) = \frac{1}{2}\left[f(YY(\phi +\pi/2)) - f(YY(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`YY(\phi)`.
Args:
phi (float): the phase angle
wires (int): the subsystem the gate acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return -0.5 * PauliY(wires=self.wires[0]) @ PauliY(wires=self.wires[1])
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.IsingYY.decomposition`.
Args:
phi (float): the phase angle
wires (Iterable, Wires): the subsystem the gate acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.IsingYY.compute_decomposition(1.23, wires=(0,1))
[CY(wires=[0, 1]), RY(1.23, wires=[0]), CY(wires=[0, 1])]
"""
return [
qml.CY(wires=wires),
qml.RY(phi, wires=[wires[0]]),
qml.CY(wires=wires),
]
@staticmethod
def compute_matrix(phi): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.IsingYY.matrix`
Args:
phi (tensor_like or float): phase angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.IsingYY.compute_matrix(torch.tensor(0.5))
tensor([[0.9689+0.0000j, 0.0000+0.0000j, 0.0000+0.0000j, 0.0000+0.2474j],
[0.0000+0.0000j, 0.9689+0.0000j, 0.0000-0.2474j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.0000-0.2474j, 0.9689+0.0000j, 0.0000+0.0000j],
[0.0000+0.2474j, 0.0000+0.0000j, 0.0000+0.0000j, 0.9689+0.0000j]])
"""
c = qml.math.cos(phi / 2)
s = qml.math.sin(phi / 2)
if qml.math.get_interface(phi) == "tensorflow":
c = qml.math.cast_like(c, 1j)
s = qml.math.cast_like(s, 1j)
# The following avoids casting an imaginary quantity to reals when backpropagating
c = (1 + 0j) * c
js = 1j * s
z = qml.math.zeros_like(js)
matrix = [
[c, z, z, js],
[z, c, -js, z],
[z, -js, c, z],
[js, z, z, c],
]
return qml.math.stack([stack_last(row) for row in matrix], axis=-2)
def adjoint(self):
(phi,) = self.parameters
return IsingYY(-phi, wires=self.wires)
def pow(self, z):
return [IsingYY(self.data[0] * z, wires=self.wires)]
class IsingZZ(Operation):
r"""
Ising ZZ coupling gate
.. math:: ZZ(\phi) = \begin{bmatrix}
e^{-i \phi / 2} & 0 & 0 & 0 \\
0 & e^{i \phi / 2} & 0 & 0 \\
0 & 0 & e^{i \phi / 2} & 0 \\
0 & 0 & 0 & e^{-i \phi / 2}
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 1
* Number of dimensions per parameter: (0,)
* Gradient recipe: :math:`\frac{d}{d\phi}f(ZZ(\phi)) = \frac{1}{2}\left[f(ZZ(\phi +\pi/2)) - f(ZZ(\phi-\pi/2))\right]`
where :math:`f` is an expectation value depending on :math:`ZZ(\theta)`.
Args:
phi (float): the phase angle
wires (int): the subsystem the gate acts on
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue (optional)
id (str or None): String representing the operation (optional)
"""
num_wires = 2
num_params = 1
"""int: Number of trainable parameters that the operator depends on."""
ndim_params = (0,)
"""tuple[int]: Number of dimensions per trainable parameter that the operator depends on."""
grad_method = "A"
parameter_frequencies = [(1,)]
def generator(self):
return -0.5 * PauliZ(wires=self.wires[0]) @ PauliZ(wires=self.wires[1])
def __init__(self, phi, wires, do_queue=True, id=None):
super().__init__(phi, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_decomposition(phi, wires):
r"""Representation of the operator as a product of other operators (static method). :
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.IsingZZ.decomposition`.
Args:
phi (float): the phase angle
wires (Iterable, Wires): the subsystem the gate acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> qml.IsingZZ.compute_decomposition(1.23, wires=0)
[CNOT(wires=[0, 1]), RZ(1.23, wires=[1]), CNOT(wires=[0, 1])]
"""
return [
qml.CNOT(wires=wires),
qml.RZ(phi, wires=[wires[1]]),
qml.CNOT(wires=wires),
]
@staticmethod
def compute_matrix(phi): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.IsingZZ.matrix`
Args:
phi (tensor_like or float): phase angle
Returns:
tensor_like: canonical matrix
**Example**
>>> qml.IsingZZ.compute_matrix(torch.tensor(0.5))
tensor([[0.9689-0.2474j, 0.0000+0.0000j, 0.0000+0.0000j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.9689+0.2474j, 0.0000+0.0000j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.0000+0.0000j, 0.9689+0.2474j, 0.0000+0.0000j],
[0.0000+0.0000j, 0.0000+0.0000j, 0.0000+0.0000j, 0.9689-0.2474j]])
"""
if qml.math.get_interface(phi) == "tensorflow":
phi = qml.math.cast_like(phi, 1j)
neg_phase = qml.math.exp(-0.5j * phi)
pos_phase = qml.math.exp(0.5j * phi)
zeros = qml.math.zeros_like(pos_phase)
matrix = [
[neg_phase, zeros, zeros, zeros],
[zeros, pos_phase, zeros, zeros],
[zeros, zeros, pos_phase, zeros],
[zeros, zeros, zeros, neg_phase],
]
return qml.math.stack([stack_last(row) for row in matrix], axis=-2)
@staticmethod
def compute_eigvals(phi): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.IsingZZ.eigvals`
Args:
phi (tensor_like or float): phase angle
Returns:
tensor_like: eigenvalues
**Example**
>>> qml.IsingZZ.compute_eigvals(torch.tensor(0.5))
tensor([0.9689-0.2474j, 0.9689+0.2474j, 0.9689+0.2474j, 0.9689-0.2474j])
"""
if qml.math.get_interface(phi) == "tensorflow":
phi = qml.math.cast_like(phi, 1j)
pos_phase = qml.math.exp(1.0j * phi / 2)
neg_phase = qml.math.exp(-1.0j * phi / 2)
return stack_last([neg_phase, pos_phase, pos_phase, neg_phase])
def adjoint(self):
(phi,) = self.parameters
return IsingZZ(-phi, wires=self.wires)
def pow(self, z):
return [IsingZZ(self.data[0] * z, wires=self.wires)]
|
import re
from django.db import models
from django.utils import timezone
from common.models import BaseModel
from organization.models import Organization
class ClientIndustry(BaseModel):
"""Client industry model"""
name = models.CharField(max_length=100, null=False)
name_slug = models.CharField(max_length=150, editable=False, null=False)
organization = models.ForeignKey(Organization, null=True, on_delete=models.SET_NULL)
class Meta:
db_table = 'client_industry'
verbose_name_plural = 'Client Industries'
unique_together = ['name', 'name_slug', 'organization']
def save(self, *args, **kwargs):
if self.is_deleted is not True:
self.name_slug = re.sub(r'\W', '-', self.name.lower()) # type: ignore
super(ClientIndustry, self).save(*args, **kwargs)
def delete(self, using=None, keep_parents=False):
self.is_deleted = True
self.deleted_at = timezone.now()
self.save()
def __str__(self):
return self.name
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../../../input/primaryobjects_voicegender/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../../../input/primaryobjects_voicegender"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# ## Import Standard Python Libraries
# In[ ]:
import io, os, sys, types, time, datetime, math, random, requests, subprocess, tempfile
# ## Packages Import
#
# These are all the packages we'll be using. Importing individual libraries make it easy for us to use them without having to call the parent libraries.
# In[ ]:
# Data Manipulation
import numpy as np
import pandas as pd
# Visualization
import matplotlib.pyplot as plt
import missingno
import seaborn as sns
from pandas.tools.plotting import scatter_matrix
from mpl_toolkits.mplot3d import Axes3D
# Feature Selection and Encoding
from sklearn.feature_selection import RFE, RFECV
from sklearn.svm import SVR
from sklearn.decomposition import PCA
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, label_binarize
# Machine learning
import sklearn.ensemble as ske
from sklearn import datasets, model_selection, tree, preprocessing, metrics, linear_model
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, Lasso, SGDClassifier
from sklearn.tree import DecisionTreeClassifier
import tensorflow as tf
# Grid and Random Search
import scipy.stats as st
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
# Metrics
from sklearn.metrics import precision_recall_fscore_support, roc_curve, auc
# Managing Warnings
import warnings
warnings.filterwarnings('ignore')
# Plot the Figures Inline
# ## Data Loading
# In[ ]:
dataset_raw = pd.read_csv("../../../input/primaryobjects_voicegender/voice.csv")
# ## Data Exploration - Univariate
#
# When exploring our dataset and its features, we have many options available to us. We can explore each feature individually, or compare pairs of features, finding the correlation between. Let's start with some simple Univariate (one feature) analysis.
#
# Features can be of multiple types:
# - **Nominal:** is for mutual exclusive, but not ordered, categories.
# - **Ordinal:** is one where the order matters but not the difference between values.
# - **Interval:** is a measurement where the difference between two values is meaningful.
# - **Ratio:** has all the properties of an interval variable, and also has a clear definition of 0.0.
#
# There are multiple ways of manipulating each feature type, but for simplicity, we'll define only two feature types:
# - **Numerical:** any feature that contains numeric values.
# - **Categorical:** any feature that contains categories, or text.
# In[ ]:
# Describing all the Numerical Features
dataset_raw.describe()
# In[ ]:
# Describing all the Categorical Features
dataset_raw.describe(include=['O'])
# In[ ]:
# Let's have a quick look at our data
dataset_raw.head()
# In[ ]:
# Let’s plot the distribution of each feature
def plot_distribution(dataset, cols=5, width=20, height=15, hspace=0.2, wspace=0.5):
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(width,height))
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=wspace, hspace=hspace)
rows = math.ceil(float(dataset.shape[1]) / cols)
for i, column in enumerate(dataset.columns):
ax = fig.add_subplot(rows, cols, i + 1)
ax.set_title(column)
if dataset.dtypes[column] == np.object:
g = sns.countplot(y=column, data=dataset)
substrings = [s.get_text()[:18] for s in g.get_yticklabels()]
g.set(yticklabels=substrings)
plt.xticks(rotation=25)
else:
g = sns.distplot(dataset[column])
plt.xticks(rotation=25)
plot_distribution(dataset_raw, cols=3, width=20, height=20, hspace=0.45, wspace=0.5)
# In[ ]:
# How many missing values are there in our dataset?
missingno.matrix(dataset_raw, figsize = (30,5))
# In[ ]:
missingno.bar(dataset_raw, sort='ascending', figsize = (30,5))
# # Feature Cleaning, Engineering, and Imputation
#
# **Cleaning:**
# To clean our data, we'll need to work with:
#
# - **Missing values:** Either omit elements from a dataset that contain missing values or impute them (fill them in).
# - **Special values:** Numeric variables are endowed with several formalized special values including ±Inf, NA and NaN. Calculations involving special values often result in special values, and need to be handled/cleaned.
# - **Outliers:** They should be detected, but not necessarily removed. Their inclusion in the analysis is a statistical decision.
# - **Obvious inconsistencies:** A person's age cannot be negative, a man cannot be pregnant and an under-aged person cannot possess a drivers license. Find the inconsistencies and plan for them.
#
# **Engineering:**
# There are multiple techniques for feature engineering:
# - **Decompose:** Converting 2014-09-20T20:45:40Z into categorical attributes like hour_of_the_day, part_of_day, etc.
# - **Discretization:** We can choose to either discretize some of the continuous variables we have, as some algorithms will perform faster. We are going to do both, and compare the results of the ML algorithms on both discretized and non discretised datasets. We'll call these datasets:
#
# - dataset_bin => where Continuous variables are Discretised
# - dataset_con => where Continuous variables are Continuous
#
# - **Reframe Numerical Quantities:** Changing from grams to kg, and losing detail might be both wanted and efficient for calculation
# - **Feature Crossing:** Creating new features as a combination of existing features. Could be multiplying numerical features, or combining categorical variables. This is a great way to add domain expertise knowledge to the dataset.
#
# **Imputation:**
# We can impute missing values in a number of different ways:
# - **Hot-Deck:** The technique then finds the first missing value and uses the cell value immediately prior to the data that are missing to impute the missing value.
# - **Cold-Deck:** Selects donors from another dataset to complete missing data.
# - **Mean-substitution:** Another imputation technique involves replacing any missing value with the mean of that variable for all other cases, which has the benefit of not changing the sample mean for that variable.
# - **Regression:** A regression model is estimated to predict observed values of a variable based on other variables, and that model is then used to impute values in cases where that variable is missing.
# In[ ]:
# To perform our data analysis, let's create new dataframes.
dataset_bin = pd.DataFrame() # To contain our dataframe with our discretised continuous variables
dataset_con = pd.DataFrame() # To contain our dataframe with our continuous variables
# ### Feature Label
#
# This is the feature we are trying to predict. We'll change the string to a binary 0/1. With 1 signifying male.
# In[ ]:
# Let's fix the Class Feature
dataset_raw.loc[dataset_raw['label'] == 'male', 'label'] = 1
dataset_raw.loc[dataset_raw['label'] == 'female', 'label'] = 0
dataset_bin['label'] = dataset_raw['label']
dataset_con['label'] = dataset_raw['label']
# In[ ]:
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,1))
sns.countplot(y="label", data=dataset_bin)
# ### Feature: meanfreq
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['meanfreq'] = pd.cut(dataset_raw['meanfreq'], 10) # discretised
dataset_con['meanfreq'] = dataset_raw['meanfreq'] # non-discretised
# In[ ]:
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="meanfreq", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['meanfreq'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['meanfreq'], kde_kws={"label": "female"})
# ### Feature: sd
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['sd'] = pd.cut(dataset_raw['sd'], 10) # discretised
dataset_con['sd'] = dataset_raw['sd'] # non-discretised
# In[ ]:
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="sd", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['sd'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['sd'], kde_kws={"label": "female"})
# ### Feature: median
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['median'] = pd.cut(dataset_raw['median'], 10) # discretised
dataset_con['median'] = dataset_raw['median'] # non-discretised
# In[ ]:
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="median", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['median'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['median'], kde_kws={"label": "female"})
# ### Feature: Q25
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['Q25'] = pd.cut(dataset_raw['Q25'], 10) # discretised
dataset_con['Q25'] = dataset_raw['Q25'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="Q25", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['Q25'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['Q25'], kde_kws={"label": "female"})
# ### Feature: Q75
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['Q75'] = pd.cut(dataset_raw['Q75'], 10) # discretised
dataset_con['Q75'] = dataset_raw['Q75'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="Q75", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['Q75'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['Q75'], kde_kws={"label": "female"})
# ### Feature: IQR
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['IQR'] = pd.cut(dataset_raw['IQR'], 10) # discretised
dataset_con['IQR'] = dataset_raw['IQR'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="IQR", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['IQR'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['IQR'], kde_kws={"label": "female"})
# ### Feature: skew
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['skew'] = pd.cut(dataset_raw['skew'], 10) # discretised
dataset_con['skew'] = dataset_raw['skew'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="skew", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['skew'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['skew'], kde_kws={"label": "female"})
# ### Feature: kurt
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['kurt'] = pd.cut(dataset_raw['kurt'], 10) # discretised
dataset_con['kurt'] = dataset_raw['kurt'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="kurt", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['kurt'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['kurt'], kde_kws={"label": "female"})
# ### Feature: sp.ent
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['sp.ent'] = pd.cut(dataset_raw['sp.ent'], 10) # discretised
dataset_con['sp.ent'] = dataset_raw['sp.ent'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="sp.ent", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['sp.ent'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['sp.ent'], kde_kws={"label": "female"})
# ### Feature: sfm
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['sfm'] = pd.cut(dataset_raw['sfm'], 10) # discretised
dataset_con['sfm'] = dataset_raw['sfm'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="sfm", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['sfm'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['sfm'], kde_kws={"label": "female"})
# ### Feature: mode
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['mode'] = pd.cut(dataset_raw['mode'], 10) # discretised
dataset_con['mode'] = dataset_raw['mode'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="mode", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['mode'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['mode'], kde_kws={"label": "female"})
# ### Feature: centroid
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['centroid'] = pd.cut(dataset_raw['centroid'], 10) # discretised
dataset_con['centroid'] = dataset_raw['centroid'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="centroid", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['centroid'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['centroid'], kde_kws={"label": "female"})
# ### Feature: meanfun
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['meanfun'] = pd.cut(dataset_raw['meanfun'], 10) # discretised
dataset_con['meanfun'] = dataset_raw['meanfun'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="meanfun", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['meanfun'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['meanfun'], kde_kws={"label": "female"})
# ### Feature: minfun
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['minfun'] = pd.cut(dataset_raw['minfun'], 10) # discretised
dataset_con['minfun'] = dataset_raw['minfun'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="minfun", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['minfun'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['minfun'], kde_kws={"label": "female"})
# ### Feature: maxfun
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['maxfun'] = pd.cut(dataset_raw['maxfun'], 10) # discretised
dataset_con['maxfun'] = dataset_raw['maxfun'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="maxfun", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['maxfun'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['maxfun'], kde_kws={"label": "female"})
# ### Feature: meandom
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['meandom'] = pd.cut(dataset_raw['meandom'], 10) # discretised
dataset_con['meandom'] = dataset_raw['meandom'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="meandom", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['meandom'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['meandom'], kde_kws={"label": "female"})
# ### Feature: mindom
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['mindom'] = pd.cut(dataset_raw['mindom'], 10) # discretised
dataset_con['mindom'] = dataset_raw['mindom'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="mindom", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['mindom'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['mindom'], kde_kws={"label": "female"})
# ### Feature: maxdom
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['maxdom'] = pd.cut(dataset_raw['maxdom'], 10) # discretised
dataset_con['maxdom'] = dataset_raw['maxdom'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="maxdom", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['maxdom'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['maxdom'], kde_kws={"label": "female"})
# ### Feature: dfrange
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['dfrange'] = pd.cut(dataset_raw['dfrange'], 10) # discretised
dataset_con['dfrange'] = dataset_raw['dfrange'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="dfrange", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['dfrange'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['dfrange'], kde_kws={"label": "female"})
# ### Feature: modindx
#
# We will use the Pandas Cut function to bin the data in equally sized buckets. We will also add our original feature to the dataset_con dataframe.
# In[ ]:
dataset_bin['modindx'] = pd.cut(dataset_raw['modindx'], 10) # discretised
dataset_con['modindx'] = dataset_raw['modindx'] # non-discretised
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(20,5))
plt.subplot(1, 2, 1)
sns.countplot(y="modindx", data=dataset_bin)
plt.subplot(1, 2, 2)
sns.distplot(dataset_con.loc[dataset_con['label'] == 1]['modindx'], kde_kws={"label": "male"})
sns.distplot(dataset_con.loc[dataset_con['label'] == 0]['modindx'], kde_kws={"label": "female"})
# ## Bi-variate Analysis
#
# So far, we have analised all features individually. Let's now start combining some of these features together to obtain further insight into the interactions between them.
# In[ ]:
# Interaction between pairs of features.
#todo select some features
# ## Feature Encoding
#
# Remember that Machine Learning algorithms perform Linear Algebra on Matrices, which means all features need have numeric values. The process of converting Categorical Features into values is called Encoding. Let's perform both One-Hot and Label encoding.
# In[ ]:
# One Hot Encodes all labels before Machine Learning
one_hot_cols = dataset_bin.columns.tolist()
one_hot_cols.remove('label')
dataset_bin_enc = pd.get_dummies(dataset_bin, columns=one_hot_cols)
dataset_bin_enc.head()
# In[ ]:
# Label Encode all labels
dataset_con_enc = dataset_con.apply(LabelEncoder().fit_transform)
dataset_con_enc.head()
# ## Feature Reduction / Selection
#
# Once we have our features ready to use, we might find that the number of features available is too large to be run in a reasonable timeframe by our machine learning algorithms. There's a number of options available to us for feature reduction and feature selection.
#
# - **Dimensionality Reduction:**
# - **Principal Component Analysis (PCA):** Principal component analysis (PCA) is a statistical procedure that uses an orthogonal transformation to convert a set of observations of possibly correlated variables into a set of values of linearly uncorrelated variables called principal components. This transformation is defined in such a way that the first principal component has the largest possible variance (that is, accounts for as much of the variability in the data as possible), and each succeeding component in turn has the highest variance possible under the constraint that it is orthogonal to the preceding components.
# - **Singular Value Decomposition (SVD):** SVD is a factorization of a real or complex matrix. It is the generalization of the eigendecomposition of a positive semidefinite normal matrix (for example, a symmetric matrix with positive eigenvalues) to any m×n matrix via an extension of the polar decomposition. It has many useful applications in signal processing and statistics.
#
#
# - **Feature Importance/Relevance:**
# - **Filter Methods:** Filter type methods select features based only on general metrics like the correlation with the variable to predict. Filter methods suppress the least interesting variables. The other variables will be part of a classification or a regression model used to classify or to predict data. These methods are particularly effective in computation time and robust to overfitting.
# - **Wrapper Methods:** Wrapper methods evaluate subsets of variables which allows, unlike filter approaches, to detect the possible interactions between variables. The two main disadvantages of these methods are : The increasing overfitting risk when the number of observations is insufficient. AND. The significant computation time when the number of variables is large.
# - **Embedded Methods:** Embedded methods try to combine the advantages of both previous methods. A learning algorithm takes advantage of its own variable selection process and performs feature selection and classification simultaneously.
# ### Feature Correlation
#
# Correlation ia s measure of how much two random variables change together. Features should be uncorrelated with each other and highly correlated to the feature we’re trying to predict.
# In[ ]:
# Create a correlation plot of both datasets.
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(25,10))
plt.subplot(1, 2, 1)
# Generate a mask for the upper triangle
mask = np.zeros_like(dataset_bin_enc.corr(), dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
plt.subplot(1, 2, 2)
mask = np.zeros_like(dataset_con_enc.corr(), dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# ### Feature Importance
#
# Random forest consists of a number of decision trees. Every node in the decision trees is a condition on a single feature, designed to split the dataset into two so that similar response values end up in the same set. The measure based on which the (locally) optimal condition is chosen is called impurity. When training a tree, it can be computed how much each feature decreases the weighted impurity in a tree. For a forest, the impurity decrease from each feature can be averaged and the features are ranked according to this measure. This is the feature importance measure exposed in sklearn’s Random Forest implementations.
# In[ ]:
# Using Random Forest to gain an insight on Feature Importance
clf = RandomForestClassifier()
clf.fit(dataset_con_enc.drop('label', axis=1), dataset_con_enc['label'])
plt.style.use('seaborn-whitegrid')
importance = clf.feature_importances_
importance = pd.DataFrame(importance, index=dataset_con_enc.drop('label', axis=1).columns, columns=["Importance"])
importance.sort_values(by='Importance', ascending=True).plot(kind='barh', figsize=(20,len(importance)/2))
# ### PCA
#
# Principal component analysis (PCA) is a statistical procedure that uses an orthogonal transformation to convert a set of observations of possibly correlated variables into a set of values of linearly uncorrelated variables called principal components. This transformation is defined in such a way that the first principal component has the largest possible variance (that is, accounts for as much of the variability in the data as possible), and each succeeding component in turn has the highest variance possible under the constraint that it is orthogonal to the preceding components.
#
# We can use PCA to reduce the number of features to use in our ML algorithms, and graphing the variance gives us an idea of how many features we really need to represent our dataset fully.
# In[ ]:
# Calculating PCA for both datasets, and graphing the Variance for each feature, per dataset
std_scale = preprocessing.StandardScaler().fit(dataset_bin_enc.drop('label', axis=1))
X = std_scale.transform(dataset_bin_enc.drop('label', axis=1))
pca1 = PCA(n_components=len(dataset_bin_enc.columns)-1)
fit1 = pca1.fit(X)
std_scale = preprocessing.StandardScaler().fit(dataset_con_enc.drop('label', axis=1))
X = std_scale.transform(dataset_con_enc.drop('label', axis=1))
pca2 = PCA(n_components=len(dataset_con_enc.columns)-2)
fit2 = pca2.fit(X)
# Graphing the variance per feature
plt.style.use('seaborn-whitegrid')
plt.figure(figsize=(25,7))
plt.subplot(1, 2, 1)
plt.xlabel('PCA Feature')
plt.ylabel('Variance')
plt.title('PCA for Discretised Dataset')
plt.bar(range(0, fit1.explained_variance_ratio_.size), fit1.explained_variance_ratio_)
plt.subplot(1, 2, 2)
plt.xlabel('PCA Feature')
plt.ylabel('Variance')
plt.title('PCA for Continuous Dataset')
plt.bar(range(0, fit2.explained_variance_ratio_.size), fit2.explained_variance_ratio_)
# In[ ]:
# PCA's components graphed in 2D and 3D
# Apply Scaling
std_scale = preprocessing.StandardScaler().fit(dataset_con_enc.drop('label', axis=1))
X = std_scale.transform(dataset_con_enc.drop('label', axis=1))
y = dataset_con_enc['label']
# Formatting
target_names = [0,1]
colors = ['navy','darkorange']
lw = 2
alpha = 0.3
# 2 Components PCA
plt.style.use('seaborn-whitegrid')
plt.figure(2, figsize=(20, 8))
plt.subplot(1, 2, 1)
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
for color, i, target_name in zip(colors, [0, 1], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1],color=color,alpha=alpha,lw=lw,label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('First two PCA directions')
# 3 Components PCA
ax = plt.subplot(1, 2, 2, projection='3d')
pca = PCA(n_components=3)
X_reduced = pca.fit(X).transform(X)
for color, i, target_name in zip(colors, [0, 1], target_names):
ax.scatter(X_reduced[y == i, 0], X_reduced[y == i, 1], X_reduced[y == i, 2],color=color,alpha=alpha,lw=lw,label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.set_ylabel("2nd eigenvector")
ax.set_zlabel("3rd eigenvector")
# rotate the axes
ax.view_init(30, 10)
# ### Recursive Feature Elimination
#
# Feature ranking with recursive feature elimination and cross-validated selection of the best number of features.
# In[ ]:
# Calculating RFE for non-discretised dataset, and graphing the Importance for each feature, per dataset
selector1 = RFECV(LogisticRegression(), step=1, cv=5, n_jobs=1)
selector1 = selector1.fit(dataset_con_enc.drop('label', axis=1).values, dataset_con_enc['label'].values)
print("Feature Ranking For Non-Discretised: %s" % selector1.ranking_)
print("Optimal number of features : %d" % selector1.n_features_)
# Plot number of features VS. cross-validation scores
plt.style.use('seaborn-whitegrid')
plt.figure(figsize=(20,5))
plt.xlabel("Number of features selected - Non-Discretised")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(selector1.grid_scores_) + 1), selector1.grid_scores_)
# Feature space could be subsetted like so:
dataset_con_enc = dataset_con_enc[dataset_con_enc.columns[np.insert(selector1.support_, 0, True)]]
# ## Selecting Dataset
#
# We now have two datasets to choose from to apply our ML algorithms. The one-hot-encoded, and the label-encoded. For now, we have decided not to use feature reduction or selection algorithms.
# In[ ]:
selected_dataset = dataset_con_enc
# In[ ]:
selected_dataset.head(2)
# ## Splitting Data into Training and Testing Datasets
#
# We need to split the data back into the training and testing datasets. Remember we joined both right at the beginning.
# In[ ]:
from sklearn.model_selection import train_test_split
X = selected_dataset.drop(['label'], axis=1)
y = selected_dataset['label'].astype('int64')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
# ## Machine Learning Algorithms
# ### Data Review
#
# Let's take one last peek at our data before we start running the Machine Learning algorithms.
# In[ ]:
X_train.shape
# In[ ]:
X_train.head()
# In[ ]:
y_train.head()
# In[ ]:
random.seed(1)
# ### Algorithms
#
# From here, we will be running the following algorithms.
#
# - KNN
# - Logistic Regression
# - Random Forest
# - Naive Bayes
# - Stochastic Gradient Decent
# - Linear SVC
# - Decision Tree
# - Gradient Boosted Trees
#
# Because there's a great deal of repetitiveness on the code for each, we'll create a custom function to analyse this.
#
# For some algorithms, we have also chosen to run a Random Hyperparameter search, to select the best hyperparameters for a given algorithm.
# In[ ]:
# calculate the fpr and tpr for all thresholds of the classification
def plot_roc_curve(y_test, preds):
fpr, tpr, threshold = metrics.roc_curve(y_test, preds)
roc_auc = metrics.auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
print()
# In[ ]:
# Function that runs the requested algorithm and returns the accuracy metrics
def fit_ml_algo(algo, X_train, y_train, X_test, cv):
# One Pass
model = algo.fit(X_train, y_train)
test_pred = model.predict(X_test)
if (isinstance(algo, (LogisticRegression,KNeighborsClassifier,GaussianNB,DecisionTreeClassifier,RandomForestClassifier,GradientBoostingClassifier))):
probs = model.predict_proba(X_test)[:,1]
else:
probs = "Not Available"
acc = round(model.score(X_test, y_test) * 100, 2)
# CV
train_pred = model_selection.cross_val_predict(algo,X_train,y_train,cv=cv,n_jobs = -1)
acc_cv = round(metrics.accuracy_score(y_train, train_pred) * 100, 2)
return train_pred, test_pred, acc, acc_cv, probs
# In[ ]:
# Logistic Regression - Random Search for Hyperparameters
# Utility function to report best scores
def report(results, n_top=5):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(results['mean_test_score'][candidate],results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
# Specify parameters and distributions to sample from
param_dist = {'penalty': ['l2', 'l1'],'class_weight': [None, 'balanced'],'C': np.logspace(-20, 20, 10000),'intercept_scaling': np.logspace(-20, 20, 10000)}
# Run Randomized Search
n_iter_search = 10
lrc = LogisticRegression()
random_search = RandomizedSearchCV(lrc,param_distributions=param_dist,n_iter=n_iter_search)
start = time.time()
random_search.fit(X_train, y_train)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time.time() - start), n_iter_search))
report(random_search.cv_results_)
# In[ ]:
# Logistic Regression
start_time = time.time()
train_pred_log, test_pred_log, acc_log, acc_cv_log, probs_log = fit_ml_algo(LogisticRegression(),X_train,y_train,X_test,10)
log_time = (time.time() - start_time)
print("Accuracy: %s" % acc_log)
print("Accuracy CV 10-Fold: %s" % acc_cv_log)
print("Running Time: %s" % datetime.timedelta(seconds=log_time))
# In[ ]:
print(metrics.classification_report(y_train, train_pred_log))
# In[ ]:
print(metrics.classification_report(y_test, test_pred_log))
# In[ ]:
plot_roc_curve(y_test, probs_log)
# In[ ]:
# k-Nearest Neighbors
start_time = time.time()
train_pred_knn, test_pred_knn, acc_knn, acc_cv_knn, probs_knn = fit_ml_algo(KNeighborsClassifier(n_neighbors = 3),X_train,y_train,X_test,10)
knn_time = (time.time() - start_time)
print("Accuracy: %s" % acc_knn)
print("Accuracy CV 10-Fold: %s" % acc_cv_knn)
print("Running Time: %s" % datetime.timedelta(seconds=knn_time))
# In[ ]:
print(metrics.classification_report(y_train, train_pred_knn))
# In[ ]:
print(metrics.classification_report(y_test, test_pred_knn))
# In[ ]:
plot_roc_curve(y_test, probs_knn)
# In[ ]:
# Gaussian Naive Bayes
start_time = time.time()
train_pred_gaussian, test_pred_gaussian, acc_gaussian, acc_cv_gaussian, probs_gau = fit_ml_algo(GaussianNB(),X_train,y_train,X_test,10)
gaussian_time = (time.time() - start_time)
print("Accuracy: %s" % acc_gaussian)
print("Accuracy CV 10-Fold: %s" % acc_cv_gaussian)
print("Running Time: %s" % datetime.timedelta(seconds=gaussian_time))
# In[ ]:
print(metrics.classification_report(y_train, train_pred_gaussian))
# In[ ]:
print(metrics.classification_report(y_test, test_pred_gaussian))
# In[ ]:
plot_roc_curve(y_test, probs_gau)
# In[ ]:
# Linear SVC
start_time = time.time()
train_pred_svc, test_pred_svc, acc_linear_svc, acc_cv_linear_svc, _ = fit_ml_algo(LinearSVC(),X_train,y_train,X_test,10)
linear_svc_time = (time.time() - start_time)
print("Accuracy: %s" % acc_linear_svc)
print("Accuracy CV 10-Fold: %s" % acc_cv_linear_svc)
print("Running Time: %s" % datetime.timedelta(seconds=linear_svc_time))
# In[ ]:
print(metrics.classification_report(y_train, train_pred_svc))
# In[ ]:
print(metrics.classification_report(y_test, test_pred_svc))
# In[ ]:
# Stochastic Gradient Descent
start_time = time.time()
train_pred_sgd, test_pred_sgd, acc_sgd, acc_cv_sgd, _ = fit_ml_algo(SGDClassifier(),X_train,y_train,X_test,10)
sgd_time = (time.time() - start_time)
print("Accuracy: %s" % acc_sgd)
print("Accuracy CV 10-Fold: %s" % acc_cv_sgd)
print("Running Time: %s" % datetime.timedelta(seconds=sgd_time))
# In[ ]:
print(metrics.classification_report(y_train, train_pred_sgd))
# In[ ]:
print(metrics.classification_report(y_test, test_pred_sgd))
# In[ ]:
# Decision Tree Classifier
start_time = time.time()
train_pred_dt, test_pred_dt, acc_dt, acc_cv_dt, probs_dt = fit_ml_algo(DecisionTreeClassifier(),X_train,y_train,X_test,10)
dt_time = (time.time() - start_time)
print("Accuracy: %s" % acc_dt)
print("Accuracy CV 10-Fold: %s" % acc_cv_dt)
print("Running Time: %s" % datetime.timedelta(seconds=dt_time))
# In[ ]:
print(metrics.classification_report(y_train, train_pred_dt))
# In[ ]:
print(metrics.classification_report(y_test, test_pred_dt))
# In[ ]:
plot_roc_curve(y_test, probs_dt)
# In[ ]:
# Random Forest Classifier - Random Search for Hyperparameters
# Utility function to report best scores
def report(results, n_top=5):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(results['mean_test_score'][candidate],results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
# Specify parameters and distributions to sample from
param_dist = {"max_depth": [10, None],"max_features": sp_randint(1, 7),"min_samples_split": sp_randint(2, 20),"min_samples_leaf": sp_randint(1, 11),"bootstrap": [True, False],"criterion": ["gini", "entropy"]}
# Run Randomized Search
n_iter_search = 10
rfc = RandomForestClassifier(n_estimators=10)
random_search = RandomizedSearchCV(rfc,param_distributions=param_dist,n_iter=n_iter_search)
start = time.time()
random_search.fit(X_train, y_train)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time.time() - start), n_iter_search))
report(random_search.cv_results_)
# In[ ]:
# Random Forest Classifier
start_time = time.time()
rfc = RandomForestClassifier(n_estimators=10,min_samples_leaf=2,min_samples_split=17,criterion='gini',max_features=8)
train_pred_rf, test_pred_rf, acc_rf, acc_cv_rf, probs_rf = fit_ml_algo(rfc,X_train,y_train,X_test,10)
rf_time = (time.time() - start_time)
print("Accuracy: %s" % acc_rf)
print("Accuracy CV 10-Fold: %s" % acc_cv_rf)
print("Running Time: %s" % datetime.timedelta(seconds=rf_time))
# In[ ]:
print(metrics.classification_report(y_train, train_pred_rf))
# In[ ]:
print(metrics.classification_report(y_test, test_pred_rf))
# In[ ]:
plot_roc_curve(y_test, probs_rf)
# In[ ]:
# Gradient Boosting Trees
start_time = time.time()
train_pred_gbt, test_pred_gbt, acc_gbt, acc_cv_gbt, probs_gbt = fit_ml_algo(GradientBoostingClassifier(),X_train,y_train,X_test,10)
gbt_time = (time.time() - start_time)
print("Accuracy: %s" % acc_gbt)
print("Accuracy CV 10-Fold: %s" % acc_cv_gbt)
print("Running Time: %s" % datetime.timedelta(seconds=gbt_time))
# In[ ]:
print(metrics.classification_report(y_train, train_pred_gbt))
# In[ ]:
print(metrics.classification_report(y_test, test_pred_gbt))
# In[ ]:
plot_roc_curve(y_test, probs_gbt)
# ## Ranking Results
#
# Let's rank the results for all the algorithms we have used
# In[ ]:
models = pd.DataFrame({'Model': ['KNN', 'Logistic Regression','Random Forest', 'Naive Bayes','Stochastic Gradient Decent', 'Linear SVC','Decision Tree', 'Gradient Boosting Trees'],'Score':[acc_knn,acc_log,acc_rf,acc_gaussian,acc_sgd,acc_linear_svc,acc_dt,acc_gbt]})
models.sort_values(by='Score', ascending=False)
# In[ ]:
models = pd.DataFrame({'Model': ['KNN', 'Logistic Regression','Random Forest', 'Naive Bayes','Stochastic Gradient Decent', 'Linear SVC','Decision Tree', 'Gradient Boosting Trees'],'Score':[acc_cv_knn,acc_cv_log,acc_cv_rf,acc_cv_gaussian,acc_cv_sgd,acc_cv_linear_svc,acc_cv_dt,acc_cv_gbt]})
models.sort_values(by='Score', ascending=False)
# In[ ]:
plt.style.use('seaborn-whitegrid')
fig = plt.figure(figsize=(10,10))
models =['KNN','Logistic Regression','Random Forest','Naive Bayes','Decision Tree','Gradient Boosting Trees']
probs =[probs_knn,probs_log,probs_rf,probs_gau,probs_dt,probs_gbt]
colors =['blue','green','red','cyan','magenta','yellow',]
plt.title('Receiver Operating Characteristic')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
def plot_roc_curves(y_test, prob, model):
fpr, tpr, threshold = metrics.roc_curve(y_test, prob)
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, 'b', label = model + ' AUC = %0.2f' % roc_auc, color=colors[i])
plt.legend(loc = 'lower right')
for i, model in list(enumerate(models)):
plot_roc_curves(y_test, probs[i], models[i])
print()
# ## Tensorflow - Logistic Regression
#
# Reference: https://www.tensorflow.org/tutorials/wide
# ### Converting Data into Tensors
# When building a TF.Learn model, the input data is specified by means of an Input Builder function. This builder function will not be called until it is later passed to TF.Learn methods such as fit and evaluate. The purpose of this function is to construct the input data, which is represented in the form of tf.Tensors or tf.SparseTensors. In more detail, the Input Builder function returns the following as a pair:
#
# - feature_cols: A dict from feature column names to Tensors or SparseTensors.
# - label: A Tensor containing the label column.
# In[ ]:
df1 = pd.DataFrame(dataset_con.dtypes, columns=['Continuous Type'])
df2 = pd.DataFrame(dataset_bin.dtypes, columns=['Discretised Type'])
pd.concat([df1, df2], axis=1).transpose()
# In[ ]:
# Selecting the Continuous Dataset
LABEL_COLUMN = "label"
dataset_con[LABEL_COLUMN] = dataset_con["label"].astype(int)
CONTINUOUS_COLUMNS = dataset_con.select_dtypes(include=[np.number]).columns.tolist()
CATEGORICAL_COLUMNS =[]
# In[ ]:
# Missing Values
missingno.matrix(dataset_con, figsize = (30,5))
# In[ ]:
# Splitting the Training and Test data sets
train = dataset_con.loc[0:2900,:]
test = dataset_con.loc[2900:,:]
# In[ ]:
# Dropping rows with Missing Values
train = train.dropna(axis=0)
test = test.dropna(axis=0)
# In[ ]:
# Coverting Dataframes into Tensors
def input_fn(df):
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {k: tf.constant(df[k].values) for k in CONTINUOUS_COLUMNS}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {k: tf.SparseTensor(indices=[[i, 0] for i in range(df[k].size)],values=df[k].values,dense_shape=[df[k].size, 1]) for k in CATEGORICAL_COLUMNS}
# Merges the two dictionaries into one.
d = continuous_cols.copy()
d.update(categorical_cols)
feature_cols = d
# Converts the label column into a constant Tensor.
label = tf.constant(df[LABEL_COLUMN].values)
# Returns the feature columns and the label.
return feature_cols, label
def train_input_fn():
return input_fn(train)
def eval_input_fn():
return input_fn(test)
# ### Base Categorical Feature Columns
# To define a feature column for a categorical feature, we can create a SparseColumn using the TF.Learn API. If you know the set of all possible feature values of a column and there are only a few of them, you can use sparse_column_with_keys. Each key in the list will get assigned an auto-incremental ID starting from 0. If we don't know the set of possible values in advance, we can use sparse_column_with_hash_bucket instead:
# In[ ]:
# Listing categorical classes for reference
train.select_dtypes(include=[np.object]).columns.tolist()
# ### Base Continuous Feature Columns
# Similarly, we can define a RealValuedColumn for each continuous feature column that we want to use in the model:
# In[ ]:
train.select_dtypes(include=[np.number]).columns.tolist()
# In[ ]:
#IQR sfm meanfun minfun maxfun mindom maxdom dfrange
IQR = tf.contrib.layers.real_valued_column("IQR")
sfm = tf.contrib.layers.real_valued_column("sfm")
meanfun = tf.contrib.layers.real_valued_column("meanfun")
minfun = tf.contrib.layers.real_valued_column("minfun")
maxfun = tf.contrib.layers.real_valued_column("maxfun")
mindom = tf.contrib.layers.real_valued_column("mindom")
maxdom = tf.contrib.layers.real_valued_column("maxdom")
dfrange = tf.contrib.layers.real_valued_column("dfrange")
# ### Defining The Logistic Regression Model
#
# After processing the input data and defining all the feature columns, we're now ready to put them all together and build a Logistic Regression model.
# In[ ]:
model_dir = tempfile.mkdtemp()
m = tf.contrib.learn.LinearClassifier(feature_columns=[IQR,sfm,meanfun,minfun,maxfun,mindom,maxdom,dfrange],model_dir=model_dir)
# ### Training and Evaluating Our Model
#
# After adding all the features to the model, now let's look at how to actually train the model. Training a model is just a one-liner using the TF.Learn API:
# In[ ]:
m.fit(input_fn=train_input_fn, steps=200)
# In[ ]:
results = m.evaluate(input_fn=eval_input_fn, steps=1)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
# In[ ]:
|
import cv2 as cv
import numpy as np
IMAGE = cv.imread('D:\@Semester 06\Digital Image Processing\Lab\Manuals\Figures\lab7\_img1.png', 0)
cv.imshow('Original Image', IMAGE)
cv.waitKey()
cv.destroyAllWindows()
print(IMAGE.shape)
def globalAdaptiveThreshold(_img):
size = np.shape(_img) # Find img size
rows = size[0]
cols = size[1]
newImg = np.zeros([rows, cols], dtype=np.uint8) # New img
group1 = group2 = [] # Lists to store groups
prevThreshold = int(np.mean(_img)) # Initial threshold
epsilon = 8
for r in range(0, rows): # Initial thresholding
for c in range(0, cols):
if _img[r][c] < prevThreshold:
group1.append(_img[r][c])
else:
group2.append(_img[r][c])
m1 = sum(group1)/len(group1) # Find mean of each group
m2 = sum(group2)/len(group2)
nextThreshold = (m1 + m2)/2 # Find new threshold
while abs(nextThreshold-prevThreshold) >= epsilon: # Keep going if condition is not satisfied.
group1 = group2 = [] # Clear groups
for r in range(0, rows): # Again threshold
for c in range(0, cols):
if _img[r][c] < nextThreshold:
group1.append(_img[r][c])
else:
group2.append(_img[r][c])
prevThreshold = nextThreshold
m1 = sum(group1) / len(group1) # Find mean of each group
m2 = sum(group2) / len(group2)
nextThreshold = (m1 + m2) / 2
for r in range(0, rows): # Thresholding on the basics of final threshold after condition is met.
for c in range(0, cols):
newImg[r][c] = 0 if _img[r][c] < nextThreshold else 255
print(newImg.shape)
cv.imshow('Global Adaptive Thresholding', newImg) # Show image
cv.waitKey(0)
cv.destroyAllWindows()
globalAdaptiveThreshold(IMAGE)
|
import logging
from twilio.rest import Client
from twilio.base.exceptions import TwilioException
class TwilioSender:
def __init__(self, filename):
with open(filename) as f:
account_sid, auth_token, from_phone_num, my_phone_num = f.readlines()
account_sid = account_sid[:-1]
auth_token = auth_token[:-1]
self.from_phone_num = from_phone_num[:-1]
self.my_phone_num = my_phone_num[:-1]
self.client = Client(account_sid, auth_token)
def send_message(self, msg):
try:
message = self.client.messages.create(body=msg, from_=self.from_phone_num, to=self.my_phone_num)
logging.debug(message.sid)
except TwilioException as ex:
logging.warning(f"Twilio exception: {ex}")
|
'''Test the Collector cidr methods '''
import ipaddress
import spectreapi
def test_collector(server):
'''Make sure we can get collectors'''
collector = server.get_collector_by_name('RodSerling')
assert collector.name == 'RodSerling', "Collectors should have names"
def test_get_targets(server):
'''Make sure we can get targets from that collector'''
collector = server.get_collector_by_name('RodSerling')
targets = collector.get_target_cidrs()
assert ipaddress.ip_network('10.201.0.7') in targets, "CIDR should be in RodSerling targets"
def test_add_targets_append(server):
'''Add targets to a collector'''
collector = server.get_collector_by_name('RodSerling')
collector.set_target_cidrs(ipaddress.ip_network('10.0.0.1'), append=True)
collector.set_target_cidrs(ipaddress.ip_network('10.0.0.2'), append=True)
targets = collector.get_target_cidrs()
assert ipaddress.ip_network('10.0.0.1') in targets, "CIDR should be in RodSerling targets"
assert ipaddress.ip_network('10.0.0.2') in targets, "CIDR should be in RodSerling targets"
def test_add_targets_overwrite(server):
'''Make sure we can overwrite a target list'''
collector = server.get_collector_by_name('RodSerling')
collector.set_target_cidrs(ipaddress.ip_network('10.201.0.7'))
targets = collector.get_target_cidrs()
assert ipaddress.ip_network('10.0.0.1') not in targets, "CIDR should not be in targets"
def test_invalid_cidr_type(server):
'''Make sure we throw exceptions as expected'''
collector = server.get_collector_by_name('RodSerling')
try:
collector._get_cidrs('foo')
except spectreapi.InvalidArgument:
assert True, "_get_cidrs('foo') raised an InvalidArgument"
return
assert False, "_get_cidrs('foo') should have raised an InvalidArgument"
def test_list_cidr(server):
collector = server.get_collector_by_name('RodSerling')
list = ['192.168.1.1/32', ipaddress.ip_network('192.168.1.2')]
results = collector.set_avoid_cidrs(list)
assert results.ok
def test_delete_cidr(server):
collector = server.get_collector_by_name('RodSerling')
add_list = ['192.168.1.3/32', ipaddress.ip_network('192.168.1.4'), '192.168.1.5/32']
results = collector.set_avoid_cidrs(add_list)
assert results.ok
delete_list = ['192.168.1.3/32', '192.168.1.5/32']
results = collector.delete_avoid_cidrs(delete_list)
assert results.ok
avoid_list = collector.get_avoid_cidrs()
assert ipaddress.ip_network('192.168.1.3') not in avoid_list, "CIDR should not be in RodSerling avoids"
assert ipaddress.ip_network('192.168.1.5') not in avoid_list, "CIDR should not be in RodSerling avoids"
assert ipaddress.ip_network('192.168.1.4') in avoid_list, "CIDR should be in RodSerling avoids"
def test_delete_zone_cidr(server):
zone = server.get_zone_by_name('Twilight')
add_list = ['192.168.1.3/32', ipaddress.ip_network('192.168.1.4'), '192.168.1.5/32']
results = zone.set_avoid_cidrs(add_list)
assert results.ok
delete_list = ['192.168.1.3/32', '192.168.1.5/32']
results = zone.delete_avoid_cidrs(delete_list)
assert results.ok
avoid_list = zone.get_avoid_cidrs()
assert ipaddress.ip_network('192.168.1.3') not in avoid_list, "CIDR should not be in RodSerling avoids"
assert ipaddress.ip_network('192.168.1.5') not in avoid_list, "CIDR should not be in RodSerling avoids"
assert ipaddress.ip_network('192.168.1.4') in avoid_list, "CIDR should be in RodSerling avoids"
|
import decimal
import os
from contextlib import contextmanager
from django.test import TestCase
from django.core.exceptions import ImproperlyConfigured
from mock import patch
from configurations.values import (Value, BooleanValue, IntegerValue,
FloatValue, DecimalValue, ListValue,
TupleValue, SingleNestedTupleValue,
SingleNestedListValue, SetValue,
DictValue, URLValue, EmailValue, IPValue,
RegexValue, PathValue, SecretValue,
DatabaseURLValue, EmailURLValue,
CacheURLValue, BackendsValue,
CastingMixin, SearchURLValue,
setup_value)
@contextmanager
def env(**kwargs):
with patch.dict(os.environ, clear=True, **kwargs):
yield
class FailingCasterValue(CastingMixin, Value):
caster = 'non.existing.caster'
class ValueTests(TestCase):
def test_value_with_default(self):
value = Value('default', environ=False)
self.assertEqual(type(value), type('default'))
self.assertEqual(value, 'default')
self.assertEqual(str(value), 'default')
def test_value_with_default_and_late_binding(self):
value = Value('default', environ=False, late_binding=True)
self.assertEqual(type(value), Value)
with env(DJANGO_TEST='override'):
self.assertEqual(value.setup('TEST'), 'default')
value = Value(environ_name='TEST')
self.assertEqual(type(value), type('override'))
self.assertEqual(value, 'override')
self.assertEqual(str(value), 'override')
self.assertEqual('{0}'.format(value), 'override')
self.assertEqual('%s' % value, 'override')
value = Value(environ_name='TEST', late_binding=True)
self.assertEqual(type(value), Value)
self.assertEqual(value.value, 'override')
self.assertEqual(str(value), 'override')
self.assertEqual('{0}'.format(value), 'override')
self.assertEqual('%s' % value, 'override')
self.assertEqual(repr(value), repr('override'))
@patch.dict(os.environ, clear=True, DJANGO_TEST='override')
def test_env_var(self):
value = Value('default')
self.assertEqual(value.setup('TEST'), 'override')
self.assertEqual(str(value), 'override')
self.assertNotEqual(value.setup('TEST'), value.default)
self.assertEqual(value.to_python(os.environ['DJANGO_TEST']),
value.setup('TEST'))
def test_value_reuse(self):
value1 = Value('default')
value2 = Value(value1)
self.assertEqual(value1.setup('TEST1'), 'default')
self.assertEqual(value2.setup('TEST2'), 'default')
with env(DJANGO_TEST1='override1', DJANGO_TEST2='override2'):
self.assertEqual(value1.setup('TEST1'), 'override1')
self.assertEqual(value2.setup('TEST2'), 'override2')
def test_value_var_equal(self):
value1 = Value('default')
value2 = Value('default')
self.assertEqual(value1, value2)
self.assertTrue(value1 in ['default'])
def test_env_var_prefix(self):
with patch.dict(os.environ, clear=True, ACME_TEST='override'):
value = Value('default', environ_prefix='ACME')
self.assertEqual(value.setup('TEST'), 'override')
with patch.dict(os.environ, clear=True, TEST='override'):
value = Value('default', environ_prefix='')
self.assertEqual(value.setup('TEST'), 'override')
with patch.dict(os.environ, clear=True, ACME_TEST='override'):
value = Value('default', environ_prefix='ACME_')
self.assertEqual(value.setup('TEST'), 'override')
def test_boolean_values_true(self):
value = BooleanValue(False)
for truthy in value.true_values:
with env(DJANGO_TEST=truthy):
self.assertTrue(value.setup('TEST'))
def test_boolean_values_faulty(self):
self.assertRaises(ValueError, BooleanValue, 'false')
def test_boolean_values_false(self):
value = BooleanValue(True)
for falsy in value.false_values:
with env(DJANGO_TEST=falsy):
self.assertFalse(value.setup('TEST'))
def test_boolean_values_nonboolean(self):
value = BooleanValue(True)
with env(DJANGO_TEST='nonboolean'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_boolean_values_assign_false_to_another_booleanvalue(self):
value1 = BooleanValue(False)
value2 = BooleanValue(value1)
self.assertFalse(value1.setup('TEST1'))
self.assertFalse(value2.setup('TEST2'))
def test_integer_values(self):
value = IntegerValue(1)
with env(DJANGO_TEST='2'):
self.assertEqual(value.setup('TEST'), 2)
with env(DJANGO_TEST='noninteger'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_float_values(self):
value = FloatValue(1.0)
with env(DJANGO_TEST='2.0'):
self.assertEqual(value.setup('TEST'), 2.0)
with env(DJANGO_TEST='noninteger'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_decimal_values(self):
value = DecimalValue(decimal.Decimal(1))
with env(DJANGO_TEST='2'):
self.assertEqual(value.setup('TEST'), decimal.Decimal(2))
with env(DJANGO_TEST='nondecimal'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_failing_caster(self):
self.assertRaises(ImproperlyConfigured, FailingCasterValue)
def test_list_values_default(self):
value = ListValue()
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), ['2', '2'])
with env(DJANGO_TEST='2, 2 ,'):
self.assertEqual(value.setup('TEST'), ['2', '2'])
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), [])
def test_list_values_separator(self):
value = ListValue(separator=':')
with env(DJANGO_TEST='/usr/bin:/usr/sbin:/usr/local/bin'):
self.assertEqual(value.setup('TEST'),
['/usr/bin', '/usr/sbin', '/usr/local/bin'])
def test_List_values_converter(self):
value = ListValue(converter=int)
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), [2, 2])
value = ListValue(converter=float)
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), [2.0, 2.0])
def test_list_values_custom_converter(self):
value = ListValue(converter=lambda x: x * 2)
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), ['22', '22'])
def test_list_values_converter_exception(self):
value = ListValue(converter=int)
with env(DJANGO_TEST='2,b'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_tuple_values_default(self):
value = TupleValue()
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), ('2', '2'))
with env(DJANGO_TEST='2, 2 ,'):
self.assertEqual(value.setup('TEST'), ('2', '2'))
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), ())
def test_single_nested_list_values_default(self):
value = SingleNestedListValue()
with env(DJANGO_TEST='2,3;4,5'):
expected = [['2', '3'], ['4', '5']]
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST='2;3;4;5'):
expected = [['2'], ['3'], ['4'], ['5']]
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST='2,3,4,5'):
expected = [['2', '3', '4', '5']]
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST='2, 3 , ; 4 , 5 ; '):
expected = [['2', '3'], ['4', '5']]
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), [])
def test_single_nested_list_values_separator(self):
value = SingleNestedListValue(seq_separator=':')
with env(DJANGO_TEST='2,3:4,5'):
self.assertEqual(value.setup('TEST'), [['2', '3'], ['4', '5']])
def test_single_nested_list_values_converter(self):
value = SingleNestedListValue(converter=int)
with env(DJANGO_TEST='2,3;4,5'):
self.assertEqual(value.setup('TEST'), [[2, 3], [4, 5]])
def test_single_nested_list_values_converter_default(self):
value = SingleNestedListValue([['2', '3'], ['4', '5']], converter=int)
self.assertEqual(value.value, [[2, 3], [4, 5]])
def test_single_nested_tuple_values_default(self):
value = SingleNestedTupleValue()
with env(DJANGO_TEST='2,3;4,5'):
expected = (('2', '3'), ('4', '5'))
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST='2;3;4;5'):
expected = (('2',), ('3',), ('4',), ('5',))
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST='2,3,4,5'):
expected = (('2', '3', '4', '5'),)
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST='2, 3 , ; 4 , 5 ; '):
expected = (('2', '3'), ('4', '5'))
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), ())
def test_single_nested_tuple_values_separator(self):
value = SingleNestedTupleValue(seq_separator=':')
with env(DJANGO_TEST='2,3:4,5'):
self.assertEqual(value.setup('TEST'), (('2', '3'), ('4', '5')))
def test_single_nested_tuple_values_converter(self):
value = SingleNestedTupleValue(converter=int)
with env(DJANGO_TEST='2,3;4,5'):
self.assertEqual(value.setup('TEST'), ((2, 3), (4, 5)))
def test_single_nested_tuple_values_converter_default(self):
value = SingleNestedTupleValue((('2', '3'), ('4', '5')), converter=int)
self.assertEqual(value.value, ((2, 3), (4, 5)))
def test_set_values_default(self):
value = SetValue()
with env(DJANGO_TEST='2,2'):
self.assertEqual(value.setup('TEST'), set(['2', '2']))
with env(DJANGO_TEST='2, 2 ,'):
self.assertEqual(value.setup('TEST'), set(['2', '2']))
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), set())
def test_dict_values_default(self):
value = DictValue()
with env(DJANGO_TEST='{2: 2}'):
self.assertEqual(value.setup('TEST'), {2: 2})
expected = {2: 2, '3': '3', '4': [1, 2, 3]}
with env(DJANGO_TEST="{2: 2, '3': '3', '4': [1, 2, 3]}"):
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST="""{
2: 2,
'3': '3',
'4': [1, 2, 3],
}"""):
self.assertEqual(value.setup('TEST'), expected)
with env(DJANGO_TEST=''):
self.assertEqual(value.setup('TEST'), {})
with env(DJANGO_TEST='spam'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_email_values(self):
value = EmailValue('[email protected]')
with env(DJANGO_TEST='[email protected]'):
self.assertEqual(value.setup('TEST'), '[email protected]')
with env(DJANGO_TEST='spam'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_url_values(self):
value = URLValue('http://eggs.spam')
with env(DJANGO_TEST='http://spam.eggs'):
self.assertEqual(value.setup('TEST'), 'http://spam.eggs')
with env(DJANGO_TEST='httb://spam.eggs'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_url_values_with_no_default(self):
value = URLValue() # no default
with env(DJANGO_TEST='http://spam.eggs'):
self.assertEqual(value.setup('TEST'), 'http://spam.eggs')
def test_url_values_with_wrong_default(self):
self.assertRaises(ValueError, URLValue, 'httb://spam.eggs')
def test_ip_values(self):
value = IPValue('0.0.0.0')
with env(DJANGO_TEST='127.0.0.1'):
self.assertEqual(value.setup('TEST'), '127.0.0.1')
with env(DJANGO_TEST='::1'):
self.assertEqual(value.setup('TEST'), '::1')
with env(DJANGO_TEST='spam.eggs'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_regex_values(self):
value = RegexValue('000--000', regex=r'\d+--\d+')
with env(DJANGO_TEST='123--456'):
self.assertEqual(value.setup('TEST'), '123--456')
with env(DJANGO_TEST='123456'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_path_values_with_check(self):
value = PathValue()
with env(DJANGO_TEST='/'):
self.assertEqual(value.setup('TEST'), '/')
with env(DJANGO_TEST='~/'):
self.assertEqual(value.setup('TEST'), os.path.expanduser('~'))
with env(DJANGO_TEST='/does/not/exist'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_path_values_no_check(self):
value = PathValue(check_exists=False)
with env(DJANGO_TEST='/'):
self.assertEqual(value.setup('TEST'), '/')
with env(DJANGO_TEST='~/spam/eggs'):
self.assertEqual(value.setup('TEST'),
os.path.join(os.path.expanduser('~'),
'spam', 'eggs'))
with env(DJANGO_TEST='/does/not/exist'):
self.assertEqual(value.setup('TEST'), '/does/not/exist')
def test_secret_value(self):
# no default allowed, only environment values are
self.assertRaises(ValueError, SecretValue, 'default')
value = SecretValue()
self.assertRaises(ValueError, value.setup, 'TEST')
with env(DJANGO_SECRET_KEY='123'):
self.assertEqual(value.setup('SECRET_KEY'), '123')
value = SecretValue(environ_name='FACEBOOK_API_SECRET',
environ_prefix=None,
late_binding=True)
self.assertRaises(ValueError, value.setup, 'TEST')
with env(FACEBOOK_API_SECRET='123'):
self.assertEqual(value.setup('TEST'), '123')
def test_database_url_value(self):
value = DatabaseURLValue()
self.assertEqual(value.default, {})
with env(DATABASE_URL='sqlite://'):
self.assertEqual(value.setup('DATABASE_URL'), {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'HOST': '',
'NAME': ':memory:',
'PASSWORD': '',
'PORT': '',
'USER': '',
}})
def test_database_url_additional_args(self):
def mock_database_url_caster(self, url, engine=None):
return { 'URL': url, 'ENGINE': engine }
with patch('configurations.values.DatabaseURLValue.caster', mock_database_url_caster):
value = DatabaseURLValue(engine='django_mysqlpool.backends.mysqlpool')
with env(DATABASE_URL='sqlite://'):
self.assertEqual(value.setup('DATABASE_URL'), {
'default': {
'URL': 'sqlite://',
'ENGINE': 'django_mysqlpool.backends.mysqlpool'
}
})
def test_email_url_value(self):
value = EmailURLValue()
self.assertEqual(value.default, {})
with env(EMAIL_URL='smtps://[email protected]:[email protected]:587'):
self.assertEqual(value.setup('EMAIL_URL'), {
'EMAIL_BACKEND': 'django.core.mail.backends.smtp.EmailBackend',
'EMAIL_FILE_PATH': '',
'EMAIL_HOST': 'smtp.example.com',
'EMAIL_HOST_PASSWORD': 'password',
'EMAIL_HOST_USER': '[email protected]',
'EMAIL_PORT': 587,
'EMAIL_USE_TLS': True})
with env(EMAIL_URL='console://'):
self.assertEqual(value.setup('EMAIL_URL'), {
'EMAIL_BACKEND': 'django.core.mail.backends.console.EmailBackend',
'EMAIL_FILE_PATH': '',
'EMAIL_HOST': None,
'EMAIL_HOST_PASSWORD': None,
'EMAIL_HOST_USER': None,
'EMAIL_PORT': None,
'EMAIL_USE_TLS': False})
with env(EMAIL_URL='smtps://[email protected]:[email protected]:wrong'):
self.assertRaises(ValueError, value.setup, 'TEST')
def test_cache_url_value(self):
cache_setting = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'host:6379:1'
}
}
cache_url = 'redis://user@host:6379/1'
value = CacheURLValue(cache_url)
self.assertEqual(value.default, cache_setting)
value = CacheURLValue()
self.assertEqual(value.default, {})
with env(CACHE_URL='redis://user@host:6379/1'):
self.assertEqual(value.setup('CACHE_URL'), cache_setting)
with env(CACHE_URL='wrong://user@host:port/1'):
with self.assertRaises(Exception) as cm:
value.setup('TEST')
self.assertEqual(cm.exception.args[0], 'Unknown backend: "wrong"')
with env(CACHE_URL='redis://user@host:port/1'):
with self.assertRaises(ValueError) as cm:
value.setup('TEST')
self.assertEqual(cm.exception.args[0],
"Cannot interpret cache URL value 'redis://user@host:port/1'")
def test_search_url_value(self):
value = SearchURLValue()
self.assertEqual(value.default, {})
with env(SEARCH_URL='elasticsearch://127.0.0.1:9200/index'):
self.assertEqual(value.setup('SEARCH_URL'), {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://127.0.0.1:9200',
'INDEX_NAME': 'index',
}})
def test_backend_list_value(self):
backends = ['django.middleware.common.CommonMiddleware']
value = BackendsValue(backends)
self.assertEqual(value.setup('TEST'), backends)
backends = ['non.existing.Backend']
self.assertRaises(ValueError, BackendsValue, backends)
def test_tuple_value(self):
value = TupleValue(None)
self.assertEqual(value.default, ())
self.assertEqual(value.value, ())
value = TupleValue((1, 2))
self.assertEqual(value.default, (1, 2))
self.assertEqual(value.value, (1, 2))
def test_set_value(self):
value = SetValue()
self.assertEqual(value.default, set())
self.assertEqual(value.value, set())
value = SetValue([1, 2])
self.assertEqual(value.default, set([1, 2]))
self.assertEqual(value.value, set([1, 2]))
def test_setup_value(self):
class Target(object):
pass
value = EmailURLValue()
with env(EMAIL_URL='smtps://[email protected]:[email protected]:587'):
setup_value(Target, 'EMAIL', value)
self.assertEqual(Target.EMAIL, {
'EMAIL_BACKEND': 'django.core.mail.backends.smtp.EmailBackend',
'EMAIL_FILE_PATH': '',
'EMAIL_HOST': 'smtp.example.com',
'EMAIL_HOST_PASSWORD': 'password',
'EMAIL_HOST_USER': '[email protected]',
'EMAIL_PORT': 587,
'EMAIL_USE_TLS': True
})
self.assertEqual(Target.EMAIL_BACKEND, 'django.core.mail.backends.smtp.EmailBackend')
self.assertEqual(Target.EMAIL_FILE_PATH, '')
self.assertEqual(Target.EMAIL_HOST, 'smtp.example.com')
self.assertEqual(Target.EMAIL_HOST_PASSWORD, 'password')
self.assertEqual(Target.EMAIL_HOST_USER, '[email protected]')
self.assertEqual(Target.EMAIL_PORT, 587)
self.assertEqual(Target.EMAIL_USE_TLS, True)
|
__author__ = "Andy Dustman <[email protected]>"
version_info = (1,3,6,'final',1)
__version__ = "1.3.6"
|
# Distributed under the MIT License.
# See LICENSE.txt for details.
import unittest
from spectre import Spectral
from spectre import DataStructures
from spectre import Interpolation
from numpy.polynomial.legendre import Legendre
import numpy as np
class TestRegularGrid(unittest.TestCase):
# arbitrary polynomial functions of low order for exact interpolation
def polynomial(self, coords):
dim = len(coords)
if dim == 1:
x = coords[0]
return x**2 + x + 1.
elif dim == 2:
x, y = coords
return 2. * x**2 + y**2 + y + x + 2.
elif dim == 3:
x, y, z = coords
return 3. * x**2 + 2. * y**2 + z**2 + 2. * y + z + x + 2.
else:
raise ValueError(
"Coordinates must have shape (dim, N) where dim is 1, 2, or 3."
)
def generate_gauss_nodes(self, num_points):
return Legendre.basis(num_points).roots()
def generate_gauss_lobatto_nodes(self, num_points):
nodes = Legendre.basis(num_points - 1).deriv().roots()
return np.concatenate(([-1], nodes, [1]))
def logical_coordinates(self, mesh):
"""
creates a uniform mesh of shape (dim, num_points) with the
requested quadrature
"""
if mesh.quadrature()[0] == Spectral.Quadrature.Gauss:
nodes = self.generate_gauss_nodes(mesh.extents(0))
elif mesh.quadrature()[0] == Spectral.Quadrature.GaussLobatto:
nodes = self.generate_gauss_lobatto_nodes(mesh.extents(0))
else:
raise ValueError(
"Only Gauss or GaussLobatto are implemented quadratures")
grid_points = np.meshgrid(*(mesh.dim * (nodes, )))
return np.stack(grid_points, 0).reshape(mesh.dim, -1)
def test_regular_grid(self):
for dim in range(1, 4):
Mesh = [Spectral.Mesh1D, Spectral.Mesh2D, Spectral.Mesh3D][dim - 1]
RegularGrid = [
Interpolation.RegularGrid1D, Interpolation.RegularGrid2D,
Interpolation.RegularGrid3D
][dim - 1]
for quadrature in [
Spectral.Quadrature.Gauss, Spectral.Quadrature.GaussLobatto
]:
for num_points in range(3, 10):
source_mesh = Mesh(num_points, Spectral.Basis.Legendre,
quadrature)
target_mesh = Mesh(num_points + 2, Spectral.Basis.Legendre,
quadrature)
interpolant = RegularGrid(source_mesh, target_mesh)
source_coords = self.logical_coordinates(source_mesh)
target_coords = self.logical_coordinates(target_mesh)
initial_data = self.polynomial(source_coords)
target_data = self.polynomial(target_coords)
interpolated_data = interpolant.interpolate(
DataStructures.DataVector(initial_data))
self.assertTrue(
np.allclose(target_data, interpolated_data, 1e-14,
1e-14))
if __name__ == '__main__':
unittest.main()
|
from itertools import permutations
def possible_permutations(sequence):
for per in permutations(sequence):
yield list(per)
'''
Create a generator function called possible_permutations() which should receive a list and return lists with all possible permutations between it's elements.
'''
[print(n) for n in possible_permutations([1, 2, 3])] |
import puzzleinput
adapters = sorted(puzzleinput.numbers)
adapters.append(adapters[-1]+3)
one_diffs = 0
three_diffs = 0
jolts = 0
for adapter in adapters:
difference = adapter - jolts
if difference <= 3:
jolts += difference
if difference == 1:
one_diffs += 1
if difference == 3:
three_diffs += 1
print(one_diffs * three_diffs)
|
import os
import subprocess
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
client_address = "http://localhost:5000/vat.html?actor=client"
server_address = "http://localhost:5000/vat.html?actor=server&name=server"
print("client", os.getcwd())
def retry_until(until, retries=1000):
r = -1
while not until() and r < retries:
if retries != 0:
r += 1
time.sleep(0.1)
def setup():
if not os.path.exists("build"):
os.mkdir("build")
subprocess.call("webpack", shell=True)
subprocess.call("cp src/*.html src/*.py src/*.gif build/", shell=True)
subprocess.call("python3 src/runserver.py &", shell=True)
retry_until(lambda: os.path.exists("server.pid"))
print("Server started, running driver")
options = webdriver.ChromeOptions()
options.add_argument("--js-flags=--harmony")
options.add_argument("--auto-open-devtools-for-tabs")
driver = webdriver.Chrome(chrome_options=options)
scr = "window.open('" + server_address + "')"
print("scr", scr)
time.sleep(0.5)
driver.execute_script(scr)
time.sleep(0.5)
scr2 = "window.open('" + client_address + "')"
driver.execute_script(scr2)
driver.save_screenshot("screenshot.png")
print("Loaded:", repr(driver.title), driver.current_url)
print("--------")
print(driver.page_source)
print("--------")
return driver
def quit(driver=None):
if driver is not None:
print("killing driver")
driver.quit()
if os.path.exists("server.pid"):
print("killing server")
with open("server.pid") as pid:
cmd = "kill " + pid.read()
subprocess.call(cmd, shell=True)
retry_until(lambda: not os.path.exists("server.pid"))
def runforever():
"""Keep running as long as the server is alive.
"""
print("waiting for server to die (press control-c to exit)")
try:
retry_until(lambda: not os.path.exists("server.pid"), 0)
except KeyboardInterrupt:
print("\nKeyboardInterrupt, exiting driver")
quit(driver)
driver = None
try:
driver = setup()
except KeyboardInterrupt:
pass
if driver is not None:
runforever()
else:
quit()
|
from kafka import KafkaProducer
import json
import sys
import time
class ProducerServer(KafkaProducer):
def __init__(self, input_file, topic, **kwargs):
super().__init__(**kwargs)
self.input_file = input_file
self.topic = topic
#TODO we're generating a dummy data
def generate_data(self):
with open(self.input_file) as f:
for item in self.iterload(f): # Load JSON items from the input_file, one item at a time
message = self.dict_to_binary(item)
# TODO send the correct data
self.send(self.topic, message)
time.sleep(1)
# TODO fill this in to return the json dictionary to binary
def dict_to_binary(self, json_dict):
return json.dumps(json_dict).encode('utf-8')
def iterload(self, file):
buffer = ""
dec = json.JSONDecoder()
for line in file:
line = line.strip(" \n\r\t")
if line == "[":
buffer = ""
continue
buffer = buffer + line
if line == "},":
yield dec.raw_decode(buffer)
buffer = "" |
from collections import defaultdict
from functools import lru_cache
from .constants import (
MAX_SQUARE,
MIN_SQUARE,
MAX_INT,
NORTH,
EAST,
SOUTH,
WEST,
NE,
SE,
SW,
NW,
)
from .types import (
DIRECTIONS,
EMPTY,
UNIVERSE,
Bitboard,
Color,
PieceType,
Square,
Squares,
Files,
Ranks,
)
from .utils import rank_mask, file_mask, diag_mask, antidiag_mask
from typing import Tuple
NOT_A = ~Files.A
NOT_AB = ~Bitboard(Files.A | Files.B)
NOT_H = ~Files.H
NOT_GH = ~Bitboard(Files.G | Files.H)
# One steps
n_one = lambda s: (s << NORTH)
s_one = lambda s: (s >> NORTH)
n_two = lambda s: (s << 16)
s_two = lambda s: (s >> 16)
e_one = lambda s: (s << EAST & NOT_A)
w_one = lambda s: (s >> EAST & NOT_H)
ne_one = lambda s: (s << 9 & NOT_A)
nw_one = lambda s: (s << 7 & NOT_H)
se_one = lambda s: (s >> 7 & NOT_A)
sw_one = lambda s: (s >> 9 & NOT_H)
STEPS = [
n_one,
ne_one,
e_one,
se_one,
s_one,
sw_one,
w_one,
nw_one,
]
def ring(s: Square) -> Bitboard:
_bb = Bitboard(1 << s)
bb = Bitboard(0)
for step in STEPS:
bb |= step(_bb)
return bb
def relative_second_rank_bb(color: Color) -> Bitboard:
return Ranks.RANK_2 if color == Color.WHITE else Ranks.RANK_7
def relative_third_rank_bb(color: Color) -> Bitboard:
return Ranks.RANK_3 if color == Color.WHITE else Ranks.RANK_6
def relative_fourth_rank_bb(color: Color) -> Bitboard:
return Ranks.RANK_4 if color == Color.WHITE else Ranks.RANK_5
def relative_eigth_rank_bb(color: Color) -> Bitboard:
return Ranks.RANK_8 if color == Color.WHITE else Ranks.RANK_1
def relative_south(color: Color, bb: Bitboard) -> Bitboard:
return bb >> 8 if color == Color.WHITE else bb << 8
def square_below(color: Color, s: Square) -> int:
if not s:
return None
return Square(s - 8) if color == Color.WHITE else Square(s + 8)
def square_above(color: Color, s: Square) -> int:
return Square(s + 8) if color == Color.WHITE else Square(s - 8)
def relative_rook_squares(color: Color, short: bool = True) -> Tuple[Square]:
"""Return the rook squares from, to by color and castling type"""
if color == Color.WHITE:
return (
(Squares.H1._value_, Squares.F1._value_)
if short
else (Squares.A1._value_, Squares.D1._value_)
)
elif color == Color.BLACK:
return (
(Squares.H8._value_, Squares.F8._value_)
if short
else (Squares.A8._value_, Squares.D8._value_)
)
def knight_attacks(s: int) -> int:
return knights_attack_mask(1 << s)
def knights_attack_mask(knights: int) -> int:
s = knights
return (
((s << 17) & NOT_A)
| ((s << 15) & NOT_H)
| ((s << 10) & NOT_AB)
| ((s << 6) & NOT_GH)
| ((s >> 17) & NOT_H)
| ((s >> 15) & NOT_A)
| ((s >> 10) & NOT_GH)
| ((s >> 6) & NOT_AB)
)
def white_pawns_all_attack_mask(pawns: int) -> int:
return ne_one(pawns) | nw_one(pawns)
def white_pawns_single_attack_mask(pawns: int) -> int:
return ne_one(pawns) ^ nw_one(pawns)
def white_pawns_double_attack_mask(pawns: int) -> int:
return ne_one(pawns) & nw_one(pawns)
def black_pawns_all_attack_mask(pawns: int) -> int:
return se_one(pawns) | sw_one(pawns)
def black_pawns_single_attack_mask(pawns: int) -> int:
return se_one(pawns) ^ sw_one(pawns)
def black_pawns_double_attack_mask(pawns: int) -> int:
return se_one(pawns) & sw_one(pawns)
def white_pawns_single_push(pawns: int) -> int:
return n_one(pawns)
def white_pawns_double_push(pawns: int) -> int:
return n_two(pawns)
def black_pawns_single_push(pawns: int) -> int:
return s_one(pawns)
def black_pawns_double_push(pawns: int) -> int:
return s_two(pawns)
def king_attacks(s: int):
s = 1 << s
return (
n_one(s)
| ne_one(s)
| e_one(s)
| se_one(s)
| s_one(s)
| sw_one(s)
| w_one(s)
| nw_one(s)
)
rank_mask_ex = lambda s: (1 << s) ^ rank_mask(s)
file_mask_ex = lambda s: (1 << s) ^ file_mask(s)
diag_mask_ex = lambda s: (1 << s) ^ diag_mask(s)
antidiag_mask_ex = lambda s: (1 << s) ^ antidiag_mask(s)
ROOK_ATTACKS = [Bitboard(rank_mask_ex(s) | file_mask_ex(s)) for s in range(64)]
BISHOP_ATTACKS = [Bitboard(diag_mask_ex(s) | antidiag_mask_ex(s)) for s in range(64)]
QUEEN_ATTACKS = [Bitboard(BISHOP_ATTACKS[s] | ROOK_ATTACKS[s]) for s in range(64)]
KNIGHT_ATTACKS = [Bitboard(knight_attacks(s)) for s in range(64)]
KING_ATTACKS = [Bitboard(king_attacks(s)) for s in range(64)]
PAWN_ATTACKS = [
white_pawns_all_attack_mask,
black_pawns_all_attack_mask,
]
PAWN_SINGLE_PUSHES = [
white_pawns_single_push,
black_pawns_single_push,
]
PAWN_DOUBLE_PUSHES = [
white_pawns_double_push,
black_pawns_double_push,
]
PAWN_SINGLE_ATTACKS = [
white_pawns_single_attack_mask,
black_pawns_single_attack_mask,
]
PAWN_DOUBLE_ATTACKS = [
white_pawns_single_attack_mask,
black_pawns_single_attack_mask,
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : YongJie-Xie
@Contact : [email protected]
@DateTime : 0000-00-00 00:00
@Description : 数据库操作类的测试类
@FileName : test_database.py
@License : MIT License
@ProjectName : Py3Scripts
@Software : PyCharm
@Version : 1.1
"""
import MySQLdb
from MySQLdb.cursors import SSCursor
from basic import Logger, DEBUG, MySQLDatabase
logger = Logger('test_database', level=DEBUG)
database = MySQLDatabase(
host='xieyongjie.cn', port=3306, username='test', password='test', database='test',
creator=MySQLdb, cursor_class=SSCursor, logger=logger
)
@logger.info('=' * 120)
def create_table(table, columns_info):
"""
Execute operation: CREATE TABLE IF NOT EXISTS `tmp_test_script`
(`a1` varchar(255) NULL, `b2` varchar(255) NULL, `c3` varchar(255) NULL);
Execute params: None
"""
logger.info('创建表:`%s`', table)
rowcount = database.create_table(table, columns_info)
logger.info('创建表结果:%s', rowcount)
@logger.info('=' * 120)
def insert_one(table, columns, params):
"""
Execute operation: INSERT INTO `tmp_test_script` (`a1`, `b2`, `c3`) VALUE (%s, %s, %s);
Execute params: ('1', '2', '3')
"""
logger.info('插入单条数据:`%s` --> %s', table, params)
rowcount = database.insert_one(table, columns, params)
logger.info('插入单条数据结果:%s', rowcount)
@logger.info('=' * 120)
def insert_all(table, columns, seq_params):
"""
Executemany operation: INSERT INTO `tmp_test_script` (`a1`, `b2`, `c3`) VALUES (%s, %s, %s);
Executemany seq_params: [('4', '5', '6'), ('7', '8', '9')]
"""
logger.info('插入多条数据:`%s` --> %s', table, seq_params)
rowcount = database.insert_all(table, columns, seq_params)
logger.info('插入多条数据结果:%s', rowcount)
@logger.info('=' * 120)
def select_one(table, columns):
"""
Execute operation: SELECT `a1`, `b2`, `c3` FROM `tmp_test_script`;
Execute params: None
"""
logger.info('查询逐条数据:`%s`', table)
for row in database.select_one(table, columns):
logger.info('逐条查询数据结果:%s', row)
@logger.info('=' * 120)
def update(table, values, columns, params):
"""
Execute operation: UPDATE `tmp_test_script` SET `a1` = %s, `b2` = %s, `c3` = %s
WHERE `a1` = %s AND `b2` = %s AND `c3` = %s;
Execute params: ('-1', '-2', '-3', '1', '2', '3')
"""
logger.info('更新数据:`%s` --> %s >>> %s', table, dict(zip(columns, params)), dict(values.items()))
rowcount = database.update(table, values, columns, params)
logger.info('更新数据结果:%s', rowcount)
@logger.info('=' * 120)
def select_many(table, columns, size=2):
"""
Execute operation: SELECT `a1`, `b2`, `c3` FROM `tmp_test_script`;
Execute params: None
"""
logger.info('查询多条数据:`%s`', table)
for row in database.select_many(table, columns, size=size):
logger.info('查询多条数据结果:%s', row)
@logger.info('=' * 120)
def delete(table, columns, params):
"""
Execute operation: DELETE FROM `tmp_test_script` WHERE `a1` = %s AND `b2` = %s AND `c3` = %s;
Execute params: ('4', '5', '6')
"""
logger.info('删除数据:`%s` --> %s', table, params)
rowcount = database.delete(table, columns, params)
logger.info('删除数据结果:%s', rowcount)
@logger.info('=' * 120)
def select_all(table, columns):
"""
Execute operation: SELECT `a1`, `b2`, `c3` FROM `tmp_test_script`;
Execute params: None
"""
logger.info('查询全部数据:`%s`', table)
rows = database.select_all(table, columns)
logger.info('查询全部数据结果:%s', rows)
@logger.info('=' * 120)
def count(table, column):
"""
Execute operation: SELECT COUNT(`a1`) FROM `tmp_test_script`;
Execute params: None
"""
logger.info('统计表:`%s`', table)
rowcount = database.count(table, column)
logger.info('统计表结果:%s', rowcount)
@logger.info('=' * 120)
def drop_table(table):
"""
Execute operation: DROP TABLE IF EXISTS `tmp_test_script`;
Execute params: None
"""
logger.info('删除表:`%s`', table)
rowcount = database.drop_table(table)
logger.info('删除表结果:%s', rowcount)
def main():
table = 'tmp_test_script'
columns = ('a1', 'b2', 'c3')
columns_info = {'a1': 'varchar(255) NULL', 'b2': 'varchar(255) NULL', 'c3': 'varchar(255) NULL'}
params_123 = ('1', '2', '3')
params_456 = ('4', '5', '6')
params_789 = ('7', '8', '9')
params_n123 = ('-1', '-2', '-3')
create_table(table=table, columns_info=columns_info)
insert_one(table=table, columns=columns, params=params_123)
insert_all(table=table, columns=columns, seq_params=[params_456, params_789])
select_one(table=table, columns=columns)
update(table=table, values=dict(zip(columns, params_n123)), columns=columns, params=params_123)
select_many(table=table, columns=columns)
delete(table=table, columns=columns, params=params_456)
select_all(table=table, columns=columns)
count(table=table, column=columns[0])
drop_table(table=table)
if __name__ == '__main__':
main()
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Functions in this file relies on depot_tools been checked-out as a sibling
# of infra.git.
import logging
import os
import re
import subprocess
BASE_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))))
def parse_revinfo(revinfo):
"""Parse the output of "gclient revinfo -a"
Args:
revinfo (str): string containing gclient stdout.
Returns:
revinfo_d (dict): <directory>: (URL, revision)
"""
revision_expr = re.compile('(.*)@([^@]*)')
revinfo_d = {}
for line in revinfo.splitlines():
if ':' not in line:
continue
# TODO: this fails when the file name contains a colon.
path, line = line.split(':', 1)
if '@' in line:
url, revision = revision_expr.match(line).groups()
revision = revision.strip()
else:
# Split at the last @
url, revision = line.strip(), None
path = path.strip()
url = url.strip()
revinfo_d[path] = {'source_url': url, 'revision': revision}
return revinfo_d
def get_revinfo(cwd=None): # pragma: no cover
"""Call gclient to get the list of all revisions actually checked out on disk.
gclient is expected to be under depot_tools/ sibling to infra/.
If gclient can't be found or fail to run returns {}.
Args:
cwd (str): working directory where to run gclient. If None, use the
current working directory.
Returns:
revinfo (dict): keys are local paths, values are dicts with keys:
'source_url' or 'revision'. The latter can be a git SHA1 or an svn
revision.
"""
cmd = [os.path.join(BASE_DIR, 'depot_tools', 'gclient'), 'revinfo', '-a']
logging.debug('Running: %s', ' '.join(cmd))
revinfo = ''
try:
revinfo = subprocess.check_output(cmd, cwd=cwd)
except (subprocess.CalledProcessError, OSError):
logging.exception('Command failed to run: %s', ' '.join(cmd))
return parse_revinfo(revinfo)
|
def test_warning_to_error_translation():
import warnings
statement = """\
def wrong1():
a = 1
b = 2
global a
global b
"""
with warnings.catch_warnings():
warnings.filterwarnings("error", module="<test string>")
try:
compile(statement, '<test string>', 'exec')
except SyntaxError as err:
assert err.lineno is not None
assert err.filename is not None
assert err.offset is not None
assert err.msg is not None
|
import pandas as pd
import pickle
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVR
pd.options.mode.chained_assignment = None
# Load the entire set
df = pd.read_csv('../data/dataset_processed.csv', index_col=0)
target = df['Enthalpy(kcal)']
features = df[df.columns[2:]]
# Define search space
Cs = [1, 10, 100, 500, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]
epsilons = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5]
# Setup the grid to be searched over
param_grid = dict(C=Cs, epsilon=epsilons)
# Define grid search
grid_search = GridSearchCV(SVR(kernel='rbf', gamma='auto'), param_grid, cv=KFold(n_splits=10, shuffle=True, random_state=9),
n_jobs=19, verbose=1, scoring='neg_mean_squared_error')
# Split data in to features and target
x_train = features.values
y_train = target.values
# Apply min max normalization
scaler = MinMaxScaler().fit(x_train)
x_train = scaler.transform(x_train)
# Find best parameters
grid_search.fit(x_train, y_train)
print(grid_search.best_params_)
print(grid_search.best_score_)
# Retrain model with best parameters found from grid search
best_params = grid_search.best_params_
model = SVR(kernel='rbf', gamma='auto', C=best_params['C'], epsilon=best_params['epsilon'])
model.fit(x_train, y_train)
# save the model
filename = '../models/final_SVR_model.sav'
pickle.dump(model, open(filename, 'wb'))
|
import tensorflow as tf
def layer(input_layer, num_next_neurons, is_output=False):
num_prev_neurons = int(input_layer.shape[1])
shape = [num_prev_neurons, num_next_neurons]
if is_output:
weight_init = tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
bias_init = tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
else:
# 1/sqrt(f)
fan_in_init = 1 / num_prev_neurons ** 0.5
weight_init = tf.random_uniform_initializer(minval=-fan_in_init, maxval=fan_in_init)
bias_init = tf.random_uniform_initializer(minval=-fan_in_init, maxval=fan_in_init)
weights = tf.get_variable("weights", shape, initializer=weight_init)
biases = tf.get_variable("biases", [num_next_neurons], initializer=bias_init)
dot = tf.matmul(input_layer, weights) + biases
if is_output:
return dot
relu = tf.nn.relu(dot)
return relu
def layer_goal_nn(input_layer, num_next_neurons, is_output=False):
num_prev_neurons = int(input_layer.shape[1])
shape = [num_prev_neurons, num_next_neurons]
fan_in_init = 1 / num_prev_neurons ** 0.5
weight_init = tf.random_uniform_initializer(minval=-fan_in_init, maxval=fan_in_init)
bias_init = tf.random_uniform_initializer(minval=-fan_in_init, maxval=fan_in_init)
weights = tf.get_variable("weights", shape, initializer=weight_init)
biases = tf.get_variable("biases", [num_next_neurons], initializer=bias_init)
dot = tf.matmul(input_layer, weights) + biases
if is_output:
return dot
relu = tf.nn.relu(dot)
return relu
|
import numpy as np
class LinearRegression:
def __init__(self, fit_intercept: bool=True):
self.fit_intercept = fit_intercept
def fit(self, X:np.ndarray, Y:np.ndarray):
if X.shape[0] != Y.shape[0]:
raise ValueError(f"Y must have shape {(X.shape[0],)}")
copy_X = np.insert(X, 0, 1, axis=1) if self.fit_intercept else X.copy()
copy_XT = np.transpose(copy_X)
w = np.matmul(np.linalg.inv(np.matmul(copy_XT, copy_X)), np.matmul(copy_XT, Y))
if self.fit_intercept:
self.intercept = w[0]
self.coef = w[1:]
else:
self.coef = w
self.intercept = 0
return self
def predict(self, X:np.ndarray):
if X.shape[1] != self.coef.shape[0]:
raise ValueError(f"X must have shape {(X.shape[0],self.coef.shape[0])}")
return np.dot(X,self.coef) + self.intercept
def score(self, X:np.ndarray, Y:np.ndarray):
return 1 - np.sum((self.predict(X) - Y)**2) / np.sum((Y - np.mean(Y))**2)
|
"""
MetaGenScope-CLI is used to upload data sets to the MetaGenScope web platform.
"""
from setuptools import find_packages, setup
dependencies = [
'click',
'requests',
'configparser',
'pandas',
'datasuper==0.9.0',
]
dependency_links = [
'git+https://github.com/dcdanko/DataSuper.git@develop#egg=datasuper-0.9.0',
]
setup(
name='metagenscope',
version='0.1.1',
url='https://github.com/bchrobot/python-metagenscope',
license='MIT',
author='Benjamin Chrobot',
author_email='[email protected]',
description='MetaGenScope-CLI is used to upload data sets to the MetaGenScope web platform.',
long_description=__doc__,
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=dependencies,
dependency_links=dependency_links,
entry_points={
'console_scripts': [
'metagenscope = metagenscope_cli.cli:main',
],
},
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
from django.db import models
# Create your models here.
class Session_data(models.Model):
session_id = models.CharField(max_length=200, primary_key=True)
usr_data = models.TextField()
total_img = models.IntegerField()
spp = models.IntegerField()
lock = models.IntegerField() |
'''
Copyright (C) 2018 Intel Corporation
?
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
?
http://www.apache.org/licenses/LICENSE-2.0
?
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
?
SPDX-License-Identifier: Apache-2.0
'''
import threading
import logging
import logging.handlers
import sys
class Logger:
LOGLEVELS = {"CRITICAL": 45,
"ERROR": 35,
"WARNING": 25,
"INFO": 15,
"DEBUG": 5,
"NOTSET": 0
}
_loginst = None
_loglock = threading.Lock()
def __init__(self, module, format, loglevel="DEBUG", tag=None):
"""Log module init method"""
self._formatter = logging.Formatter(format, '%Y-%m-%d %H:%M:%S')
self._logobj = logging.getLogger(module)
self._level = Logger.LOGLEVELS[loglevel]
self._logobj.setLevel(Logger.LOGLEVELS[loglevel])
writer = logging.StreamHandler(sys.stdout)
writer.setLevel(Logger.LOGLEVELS[loglevel])
writer.setFormatter(self._formatter)
self._logobj.addHandler(writer)
@staticmethod
def getlogger(module="OAT-L",
format="[%(asctime)s - %(levelname)s] %(message)s"):
"""Get logger instance"""
if Logger._loginst is None:
Logger._loglock.acquire()
if Logger._loginst is None:
Logger._loginst = Logger(module, format)
else:
pass
Logger._loglock.release()
return Logger._loginst
def debug(self, message=None):
"""Log message of Debug"""
if message is not None:
self._logobj.debug(message)
def info(self, message=None):
"""Log message of user info"""
if message is not None:
self._logobj.info(message)
def warning(self, message=None):
"""Log message of warning"""
if message is not None:
self._logobj.warning(message)
def error(self, message=None):
"""Log message of error"""
if message is not None:
self._logobj.error(message)
def critical(self, message=None):
"""Log message of critical error"""
if message is not None:
self._logobj.critical(message)
|
"""
Module for managing the climate within a room.
* It reads/listens to a temperature address from KNX bus.
* Manages and sends the desired setpoint to KNX bus.
Modified by Haifeng for KTS smart solution in Guohao Changfeng Residence
- Air conditioner: temperature, target_temperature, operation_mode, fan_mode, on_off
- Floor heating: temperature, target_temperature, on_off
"""
from xknx.exceptions import CouldNotParseTelegram, DeviceIllegalValue
from xknx.knx import GroupAddress
from xknx.devices.device import Device
from xknx.devices.remote_value_temp import RemoteValueTemp
from xknx.devices.remote_value_1count import RemoteValue1Count
from xknx.devices.remote_value_switch import RemoteValueSwitch
class KTSClimate(Device):
"""Class for managing the climate."""
# pylint: disable=too-many-instance-attributes,invalid-name
DEFAULT_TARGET_TEMPERATURE_STEP = 0.5
DEFAULT_TARGET_TEMPERATURE_MAX = 35
DEFAULT_TARGET_TEMPERATURE_MIN = 5
def __init__(self,
xknx,
name,
group_address_temperature=None,
group_address_target_temperature=None,
group_address_target_temperature_state=None,
target_temperature_step=DEFAULT_TARGET_TEMPERATURE_STEP,
target_temperature_max=DEFAULT_TARGET_TEMPERATURE_MAX,
target_temperature_min=DEFAULT_TARGET_TEMPERATURE_MIN,
group_address_operation_mode=None,
group_address_operation_mode_state=None,
group_address_fan_mode=None,
group_address_fan_mode_state=None,
group_address_on_off=None,
group_address_on_off_state=None,
device_updated_cb=None):
"""Initialize Climate class."""
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches, too-many-statements
super(KTSClimate, self).__init__(xknx, name, device_updated_cb)
self.temperature = RemoteValueTemp(
xknx,
group_address_state=group_address_temperature,
device_name=self.name,
after_update_cb=self.after_update)
self.target_temperature = RemoteValueTemp(
xknx,
group_address_target_temperature,
group_address_target_temperature_state,
device_name=self.name,
after_update_cb=self.after_update)
self.target_temperature_step = target_temperature_step
self.target_temperature_max = target_temperature_max
self.target_temperature_min = target_temperature_min
self.supports_on_off = group_address_on_off is not None or \
group_address_on_off_state is not None
self.on = RemoteValueSwitch(
xknx,
group_address_on_off,
group_address_on_off_state,
self.name,
self.after_update)
self.supports_operation_mode = \
group_address_operation_mode is not None or \
group_address_operation_mode_state is not None
self.operation_mode = RemoteValue1Count(
xknx,
group_address_operation_mode,
group_address_operation_mode_state,
device_name=self.name,
after_update_cb=self.after_update)
self.supports_fan_mode = \
group_address_fan_mode is not None or \
group_address_fan_mode_state is not None
self.fan_mode = RemoteValue1Count(
xknx,
group_address_fan_mode,
group_address_fan_mode_state,
device_name=self.name,
after_update_cb=self.after_update)
@classmethod
def from_config(cls, xknx, name, config):
"""Initialize object from configuration structure."""
# pylint: disable=too-many-locals
group_address_temperature = \
config.get('group_address_temperature')
group_address_target_temperature = \
config.get('group_address_target_temperature')
group_address_target_temperature_state = \
config.get('group_address_target_temperature_state')
target_temperature_step = config.get('target_temperature_step')
target_temperature_max = config.get('target_temperature_max')
target_temperature_min = config.get('target_temperature_min')
group_address_operation_mode = \
config.get('group_address_operation_mode')
group_address_operation_mode_state = \
config.get('group_address_operation_mode_state')
group_address_fan_mode = \
config.get('group_address_fan_mode')
group_address_fan_mode_state = \
config.get('group_address_fan_mode_state')
group_address_on_off = \
config.get('group_address_on_off')
group_address_on_off_state = \
config.get('group_address_on_off_state')
return cls(xknx,
name,
group_address_temperature=group_address_temperature,
group_address_target_temperature=group_address_target_temperature,
group_address_target_temperature_state=group_address_target_temperature_state,
target_temperature_step=target_temperature_step,
target_temperature_max=target_temperature_max,
target_temperature_min=target_temperature_min,
group_address_operation_mode=group_address_operation_mode,
group_address_operation_mode_state=group_address_operation_mode_state,
group_address_fan_mode=group_address_fan_mode,
group_address_fan_mode_state=group_address_fan_mode_state,
group_address_on_off=group_address_on_off,
group_address_on_off_state=group_address_on_off_state)
def has_group_address(self, group_address):
"""Test if device has given group address."""
return self.temperature.has_group_address(group_address) or \
self.target_temperature.has_group_address(group_address) or \
self.operation_mode.has_group_address(group_address) or \
self.fan_mode.has_group_address(group_address) or \
self.on.has_group_address(group_address)
@property
def is_on(self):
"""Return power status."""
#return bool(self.on.value and self.on.value.value == 1)
return self.on.value
async def turn_on(self):
"""Set power status to on."""
await self.on.on()
async def turn_off(self):
"""Set power status to off."""
await self.on.off()
async def set_target_temperature(self, target_temperature):
"""Send target temperature to KNX bus."""
# send/broadcast new target temperature and set internally
await self.target_temperature.set(target_temperature)
async def set_operation_mode(self, operation_mode):
"""Set the operation mode of a thermostat. Send new operation_mode to BUS and update internal state."""
if not self.supports_operation_mode:
raise DeviceIllegalValue("operation mode not supported", operation_mode)
if operation_mode == 'Cool':
await self.operation_mode.set(1)
if operation_mode == 'Heat':
await self.operation_mode.set(4)
if operation_mode == 'Fan':
await self.operation_mode.set(3)
if operation_mode == 'Dry':
await self.operation_mode.set(2)
# if operation_mode == 'Auto':
# await self.operation_mode.set(-1)
def get_supported_operation_modes(self):
"""Return all configured operation modes."""
if not self.supports_operation_mode:
return []
else:
return ['Cool', 'Heat', 'Fan', 'Dry']
def get_operation_mode(self):
"""Return current operation mode."""
if not self.supports_operation_mode:
return None
else:
val = self.operation_mode.value
if val == 1:
return 'Cool'
if val == 4:
return 'Heat'
if val == 3:
return 'Fan'
if val == 2:
return 'Dry'
# if val == -1:
# return 'Auto'
return 'Cool'
async def set_fan_mode(self, fan_mode):
"""Set the fan mode of a thermostat. Send new fan_mode to BUS and update internal state."""
if not self.supports_fan_mode:
raise DeviceIllegalValue("fan mode not supported", fan_mode)
if fan_mode == 'Low':
await self.fan_mode.set(1)
if fan_mode == 'Medium':
await self.fan_mode.set(2)
if fan_mode == 'High':
await self.fan_mode.set(3)
if fan_mode == 'Auto':
await self.fan_mode.set(4)
def get_supported_fan_modes(self):
"""Return all configured fan modes."""
if not self.supports_fan_mode:
return []
else:
return ['Low', 'Medium', 'High', 'Auto']
def get_fan_mode(self):
if not self.supports_fan_mode:
return None
else:
val = self.fan_mode.value
if val == 1:
return 'Low'
if val == 2:
return 'Medium'
if val == 3:
return 'High'
if val == 4:
return 'Auto'
return 'Auto'
async def process_group_write(self, telegram):
"""Process incoming GROUP WRITE telegram."""
await self.temperature.process(telegram)
await self.target_temperature.process(telegram)
await self.operation_mode.process(telegram)
await self.fan_mode.process(telegram)
await self.on.process(telegram)
def state_addresses(self):
"""Return group addresses which should be requested to sync state."""
state_addresses = []
state_addresses.extend(self.temperature.state_addresses())
state_addresses.extend(self.target_temperature.state_addresses())
state_addresses.extend(self.on.state_addresses())
if self.supports_operation_mode:
state_addresses.extend(self.operation_mode.state_addresses())
if self.supports_fan_mode:
state_addresses.extend(self.fan_mode.state_addresses())
# Note: telegrams setting splitted up operation modes are not yet implemented
return state_addresses
def __str__(self):
"""Return object as readable string."""
return '<KTS Climate name="{0}" ' \
'temperature="{1}" ' \
'target_temperature="{2}" ' \
'target_temperature_step="{3}" ' \
'target_temperature_max="{4}" '\
'target_temperature_min="{5}" '\
'group_address_operation_mode="{6}" ' \
'group_address_fan_mode="{7}" ' \
'group_address_on_off="{8}" ' \
'/>' \
.format(
self.name,
self.temperature.group_addr_str(),
self.target_temperature.group_addr_str(),
self.target_temperature_step,
self.target_temperature_max,
self.target_temperature_min,
self.operation_mode.group_addr_str(),
self.fan_mode.group_addr_str(),
self.on.group_addr_str())
def __eq__(self, other):
"""Equal operator."""
return self.__dict__ == other.__dict__
|
# /usr/bin/env python
# coding:utf-8
"""
Author: zhiying
URL: www.zhouzying.cn
Data: 2019-01-24
Description: B站用户信息爬虫
抓取字段:用户id,昵称,性别,头像,等级,经验值,粉丝数,生日,地址,注册时间,签名,等级与经验值等。抓取之后生成B站用户数据报告。
"""
import requests
import time
import pymysql.cursors
def get_info():
headers = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/69.0.3497.100 Safari/537.36"}
# B站创始人的个人信息url = 'http://api.bilibili.com/x/space/acc/info?mid=2&jsonp=jsonp'
# 对应页面为http://space.bilibili.com/2
# mid = 1 开始, mid = 5406,5824存在异常
mid = 5825
while True:
url = 'http://api.bilibili.com/x/space/acc/info?mid={}&jsonp=jsonp'.format(mid)
r = requests.get(url, headers=headers)
if r.json()['code'] == 0:
# print(r.json()['data']['name'])
data = r.json()['data']
yield data
if r.json()['code'] == -400:
with open('log.txt', 'a', encoding='utf-8') as f:
f.write("B站共有{}用户。\n".format(mid - 1))
f.close()
break
# 每爬1000条休息10s
if mid % 1000 == 0:
time.sleep(10)
mid += 1
def save_to_databases():
# Connect to the database
connection = pymysql.connect(host='localhost', # host是数据库主机地址
user='root', # 数据库用户名
password='', # 数据库密码
# 数据库名(可选)
db='Bilibili_infos', # 选择要操作的数据库名(可选)
charset='utf8mb4', # 字符(可选)
cursorclass=pymysql.cursors.DictCursor) # 可选参数
print("MySQL登录成功")
try:
for item in get_info():
# 用户id
uid = item['mid']
# 昵称
name = item['name']
# 性别
sex = item['sex']
# 头像
face_url = item['face']
# 等级
rank = item['level']
# 经验值
coins = item['coins']
# 生日
birth = item['birthday']
# 地址
# place = item['']
# 签名
sign = item['sign']
sign = sign.strip()
# 注册时间
jointime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(item['jointime']))
# 关注数
# 粉丝数
# 播放数
print('用户id:{} 昵称:{} 性别:{} 等级:{} 经验值:{} 生日:{} 签名:{} 注册时间:{}'.format(uid, name, sex, rank, coins, birth, sign, jointime))
try:
with connection.cursor() as cursor:
# Create a new record
# sql = "INSERT INTO `infos` (`uid`, `name`, `sex`, `birth`, `sign`, `jointime`, `face_url`, `rank`, `coins`) VALUES (uid, name, sex, birth, sign, jointime, face_url, rank, coins);"
# sql = "INSERT INTO info3 (uid, name, sign) VALUES ({},{}, {})".format(uid, name, sign)
cursor.execute('INSERT INTO `info4` (`uid`, `name`, `sex`, `rank`, `coins`, `birth`,'
' `sign`, `jointime`) VALUES ("%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s");'
% (uid, name, sex, rank, coins, birth, sign, jointime))
# connection is not autocommit by default. So you must commit to save
# your changes.
connection.commit()
except:
with open('error.txt', 'a') as f:
f.write('{} 出现错误!\n'.format(uid))
f.close()
continue
finally:
connection.close()
save_to_databases()
|
"""
Defines package metadata.
Be careful not to pull in anything Django-related here (this may complicate access to the
metadata defined below).
"""
from pathlib import PurePath
version_file_path = PurePath(__file__).parent / 'VERSION'
with open(version_file_path) as file:
__version__ = file.read().strip()
|
import os
import logging
import math
from pyvivado import builder, interface, signal
from rfgnocchi import config, noc, ettus
logger = logging.getLogger(__name__)
class NocBlockDummyBuilder(builder.Builder):
def __init__(self, params):
super().__init__(params)
module_name = 'noc_block_dummy'
self.builders = [
ettus.get_builder('noc_shell'),
ettus.get_builder('setting_reg'),
ettus.get_builder('axi_wrapper'),
ettus.get_builder('axi_round_and_clip_complex'),
]
self.simple_filenames = [
os.path.join(config.basedir, 'ettus', 'rfnoc', 'noc_block_dummy.vhd'),
]
def get_noc_block_dummy_interface(params):
builder = NocBlockDummyBuilder(params)
iface = interface.Interface(
wires_in=noc.noc_input_wires,
wires_out=noc.noc_output_wires,
module_name='noc_block_dummy',
parameters=params,
builder=builder,
clock_names=noc.noc_clocks,
constants=builder.constants,
)
return iface
assert('noc_block_dummy' not in interface.module_register)
interface.module_register['noc_block_dummy'] = get_noc_block_dummy_interface
|
# -*- coding: utf-8 -*-
import torch
from torch import nn
from torch.autograd import Variable
from ..utils.nn import get_rnn_hidden_state
from . import FF, Attention
class ReverseVideoDecoder(nn.Module):
"""
A reverse video feature reconstruction decoder
"""
def __init__(self, input_size, hidden_size, ctx_size_dict, ctx_name,
rnn_type, video_dim, dec_init='zero',
dec_init_size=None, att_type='mlp',
att_activ='tanh', att_bottleneck='ctx', att_temp=1.0,
use_smoothL1=False, transform_ctx=True, mlp_bias=False, dropout_out=0):
super().__init__()
# Normalize case
self.rnn_type = rnn_type.upper()
# Safety checks
assert self.rnn_type in ('GRU', 'LSTM'), \
"rnn_type '{}' not known".format(rnn_type)
assert dec_init in ('zero', 'mean_ctx', 'feats'), \
"dec_init '{}' not known".format(dec_init)
RNN = getattr(nn, '{}Cell'.format(self.rnn_type))
# LSTMs have also the cell state
self.n_states = 1 if self.rnn_type == 'GRU' else 2
# Set custom handlers for GRU/LSTM
if self.rnn_type == 'GRU':
self._rnn_unpack_states = lambda x: x
self._rnn_pack_states = lambda x: x
elif self.rnn_type == 'LSTM':
self._rnn_unpack_states = self._lstm_unpack_states
self._rnn_pack_states = self._lstm_pack_states
# Set decoder initializer
self._init_func = getattr(self, '_rnn_init_{}'.format(dec_init))
# Other arguments
self.input_size = input_size
self.hidden_size = hidden_size
self.ctx_size_dict = ctx_size_dict
self.ctx_name = ctx_name
self.dec_init = dec_init
self.dec_init_size = dec_init_size
self.att_type = att_type
self.att_bottleneck = att_bottleneck
self.att_activ = att_activ
self.att_temp = att_temp
self.transform_ctx = transform_ctx
self.mlp_bias = mlp_bias
self.dropout_out = dropout_out
self.video_dim = video_dim
self.use_smoothL1 = use_smoothL1
# Create a video frame embedding layer that maps the video_dim(2048) -> proj_size
self.emb = FF(self.video_dim, self.hidden_size, bias=True, activ='tanh')
# Create an attention layer
self.att = Attention(self.ctx_size_dict[self.ctx_name], self.hidden_size,
transform_ctx=self.transform_ctx,
mlp_bias=self.mlp_bias,
att_type=self.att_type,
att_activ=self.att_activ,
att_bottleneck=self.att_bottleneck,
temp=self.att_temp)
# Decoder initializer FF (for 'mean_ctx' or auxiliary 'feats')
if self.dec_init in ('mean_ctx', 'feats'):
if self.dec_init == 'mean_ctx':
self.dec_init_size = self.ctx_size_dict[self.ctx_name]
self.ff_dec_init = FF(
self.dec_init_size,
self.hidden_size * self.n_states, activ='tanh')
# Create first decoder layer necessary for attention
self.dec0 = RNN(self.hidden_size, self.hidden_size)
self.dec1 = RNN(self.hidden_size, self.hidden_size)
# Output dropout
if self.dropout_out > 0:
self.do_out = nn.Dropout(p=self.dropout_out)
# Output bottleneck: maps hidden states to target emb dim
self.hid2out = FF(self.hidden_size, self.video_dim)
# MSE loss
self.MSE_loss = nn.MSELoss(size_average=False)
# SmoothL1 loss
self.SmoothL1_loss = nn.SmoothL1Loss(size_average=False)
def _lstm_pack_states(self, h):
return torch.cat(h, dim=-1)
def _lstm_unpack_states(self, h):
# Split h_t and c_t into two tensors and return a tuple
return torch.split(h, self.hidden_size, dim=-1)
def _rnn_init_zero(self, ctx_dict):
ctx, _ = ctx_dict[self.ctx_name]
h_0 = torch.zeros(ctx.shape[1], self.hidden_size * self.n_states)
return Variable(h_0).cuda()
def _rnn_init_mean_ctx(self, ctx_dict):
ctx, ctx_mask = ctx_dict[self.ctx_name]
if ctx_mask is None:
return self.ff_dec_init(ctx.mean(0))
else:
return self.ff_dec_init(ctx.sum(0) / ctx_mask.sum(0).unsqueeze(1))
def _rnn_init_feats(self, ctx_dict):
ctx, _ = ctx_dict['feats']
return self.ff_dec_init(ctx)
def f_init(self, ctx_dict):
"""Returns the initial h_0 for the decoder."""
self.alphas = []
return self._init_func(ctx_dict)
def f_next(self, ctx_dict, y, hidden):
'''
Encode the video frame features in the first layer of the RNN (dec0).
Build the hidden layer
'''
# Get hidden states from the first decoder (purely cond. on LM)
h1_c1 = self.dec0(y, self._rnn_unpack_states(hidden))
h1 = get_rnn_hidden_state(h1_c1)
# Apply attention
self.txt_alpha_t, txt_z_t = self.att(
h1.unsqueeze(0), *ctx_dict[self.ctx_name])
# Run second decoder (h1 is compatible now as it was returned by GRU)
h2_c2 = self.dec1(txt_z_t, h1_c1)
h2 = get_rnn_hidden_state(h2_c2)
# This is a bottleneck to avoid going from H to V directly
predicted_v = self.hid2out(h2)
return predicted_v, self._rnn_pack_states(h2_c2)
def forward(self, ctx_dict, y, use_smoothL1=False):
"""
Computes the Mean Squared Error or the SmoothL1 between the predicted
feature vector and the ground-truth video features indices `y`.
Only called during training.
"""
loss = 0.0
# reverse the order of the target sequence
y = y.transpose(0, 1)
idx = [i for i in range(y.size(1) - 1, -1, -1)]
idx = Variable(torch.LongTensor(idx)).cuda()
y = y.index_select(1, idx).transpose(0, 1)
# Get initial hidden state
hidden = self.f_init(ctx_dict)
# Initializing the first step of the decoder with zeros
predicted_v = Variable(torch.zeros(y.shape[1], self.video_dim)).cuda()
for t in range(y.shape[0]):
predicted_v = self.emb(predicted_v)
predicted_v, hidden = self.f_next(ctx_dict, predicted_v, hidden)
if use_smoothL1:
loss += self.SmoothL1_loss(predicted_v, y[t])
else:
loss += self.MSE_loss(predicted_v, y[t])
return {'loss': loss, 'logps': None, 'n_items': y.shape[0] * y.shape[1]}
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import shutil
import json
import os
import tempfile
import time
import threading
import shlex
import traceback
import signal
from argparse import ArgumentParser
from mephisto.operations.supervisor import Supervisor, Job
from typing import Dict, Optional, List, Any, Tuple, NamedTuple, Type, TYPE_CHECKING
from mephisto.data_model.task_config import TaskConfig
from mephisto.data_model.task_run import TaskRun
from mephisto.data_model.requester import Requester
from mephisto.abstractions.blueprint import OnboardingRequired, SharedTaskState
from mephisto.abstractions.database import MephistoDB, EntryDoesNotExistException
from mephisto.data_model.qualification import make_qualification_dict, QUAL_NOT_EXIST
from mephisto.operations.task_launcher import TaskLauncher
from mephisto.operations.registry import (
get_blueprint_from_type,
get_crowd_provider_from_type,
get_architect_from_type,
)
from mephisto.operations.utils import get_mock_requester
from mephisto.operations.logger_core import get_logger, set_mephisto_log_level
from omegaconf import DictConfig, OmegaConf
logger = get_logger(name=__name__)
if TYPE_CHECKING:
from mephisto.data_model.agent import Agent
from mephisto.abstractions.blueprint import Blueprint, TaskRunner
from mephisto.abstractions.crowd_provider import CrowdProvider
from mephisto.abstractions.architect import Architect
from argparse import Namespace
RUN_STATUS_POLL_TIME = 10
class TrackedRun(NamedTuple):
task_run: TaskRun
architect: "Architect"
task_runner: "TaskRunner"
task_launcher: TaskLauncher
job: Job
class Operator:
"""
Acting as the controller behind the curtain, the Operator class
is responsible for managing the knobs, switches, and dials
of the rest of the Mephisto architecture.
Most convenience scripts for using Mephisto will use an Operator
to get the job done, though this class itself is also a
good model to use to understand how the underlying
architecture works in order to build custom jobs or workflows.
"""
def __init__(self, db: "MephistoDB"):
self.db = db
self.supervisor = Supervisor(db)
self._task_runs_tracked: Dict[str, TrackedRun] = {}
self.is_shutdown = False
self._run_tracker_thread = threading.Thread(
target=self._track_and_kill_runs, name="Operator-tracking-thread"
)
self._run_tracker_thread.start()
@staticmethod
def _get_baseline_argparser() -> ArgumentParser:
"""Return a parser for the baseline requirements to launch a job"""
parser = ArgumentParser()
parser.add_argument(
"--blueprint-type",
dest="blueprint_type",
help="Name of the blueprint to launch",
required=True,
)
parser.add_argument(
"--architect-type",
dest="architect_type",
help="Name of the architect to launch with",
required=True,
)
parser.add_argument(
"--requester-name",
dest="requester_name",
help="Identifier for the requester to launch as",
required=True,
)
return parser
def get_running_task_runs(self):
"""Return the currently running task runs and their handlers"""
return self._task_runs_tracked.copy()
def parse_and_launch_run(
self,
arg_list: Optional[List[str]] = None,
extra_args: Optional[Dict[str, Any]] = None,
) -> Optional[str]:
"""
Wrapper around parse and launch run that prints errors on failure, rather
than throwing. Generally for use in scripts.
"""
raise Exception(
"Operator.parse_and_launch_run has been deprecated in favor "
"of using Hydra for argument configuration. See the docs at "
"https://github.com/facebookresearch/Mephisto/blob/main/docs/hydra_migration.md "
"in order to upgrade."
)
def validate_and_run_config_or_die(
self, run_config: DictConfig, shared_state: Optional[SharedTaskState] = None
) -> str:
"""
Parse the given arguments and launch a job.
"""
set_mephisto_log_level(level=run_config.get("log_level", "info"))
# First try to find the requester:
requester_name = run_config.provider.requester_name
requesters = self.db.find_requesters(requester_name=requester_name)
if len(requesters) == 0:
if run_config.provider.requester_name == "MOCK_REQUESTER":
requesters = [get_mock_requester(self.db)]
else:
raise EntryDoesNotExistException(
f"No requester found with name {requester_name}"
)
requester = requesters[0]
requester_id = requester.db_id
provider_type = requester.provider_type
assert provider_type == run_config.provider._provider_type, (
f"Found requester for name {requester_name} is not "
f"of the specified type {run_config.provider._provider_type}, "
f"but is instead {provider_type}."
)
# Next get the abstraction classes, and run validation
# before anything is actually created in the database
blueprint_type = run_config.blueprint._blueprint_type
architect_type = run_config.architect._architect_type
BlueprintClass = get_blueprint_from_type(blueprint_type)
ArchitectClass = get_architect_from_type(architect_type)
CrowdProviderClass = get_crowd_provider_from_type(provider_type)
if shared_state is None:
shared_state = BlueprintClass.SharedStateClass()
BlueprintClass.assert_task_args(run_config, shared_state)
ArchitectClass.assert_task_args(run_config, shared_state)
CrowdProviderClass.assert_task_args(run_config, shared_state)
# Find an existing task or create a new one
task_name = run_config.task.get("task_name", None)
if task_name is None:
task_name = blueprint_type
logger.warning(
f"Task is using the default blueprint name {task_name} as a name, "
"as no task_name is provided"
)
tasks = self.db.find_tasks(task_name=task_name)
task_id = None
if len(tasks) == 0:
task_id = self.db.new_task(task_name, blueprint_type)
else:
task_id = tasks[0].db_id
logger.info(f"Creating a task run under task name: {task_name}")
# Create a new task run
new_run_id = self.db.new_task_run(
task_id,
requester_id,
json.dumps(OmegaConf.to_yaml(run_config, resolve=True)),
provider_type,
blueprint_type,
requester.is_sandbox(),
)
task_run = TaskRun.get(self.db, new_run_id)
try:
# Register the blueprint with args to the task run,
# ensure cached
blueprint = task_run.get_blueprint(
args=run_config, shared_state=shared_state
)
# If anything fails after here, we have to cleanup the architect
build_dir = os.path.join(task_run.get_run_dir(), "build")
os.makedirs(build_dir, exist_ok=True)
architect = ArchitectClass(
self.db, run_config, shared_state, task_run, build_dir
)
# Setup and deploy the server
built_dir = architect.prepare()
task_url = architect.deploy()
# TODO(#102) maybe the cleanup (destruction of the server configuration?) should only
# happen after everything has already been reviewed, this way it's possible to
# retrieve the exact build directory to review a task for real
architect.cleanup()
# Create the backend runner
task_runner = BlueprintClass.TaskRunnerClass(
task_run, run_config, shared_state
)
# Small hack for auto appending block qualification
existing_qualifications = shared_state.qualifications
if run_config.blueprint.get("block_qualification", None) is not None:
existing_qualifications.append(
make_qualification_dict(
run_config.blueprint.block_qualification, QUAL_NOT_EXIST, None
)
)
if run_config.blueprint.get("onboarding_qualification", None) is not None:
existing_qualifications.append(
make_qualification_dict(
OnboardingRequired.get_failed_qual(
run_config.blueprint.onboarding_qualification
),
QUAL_NOT_EXIST,
None,
)
)
shared_state.qualifications = existing_qualifications
# Register the task with the provider
provider = CrowdProviderClass(self.db)
provider.setup_resources_for_task_run(
task_run, run_config, shared_state, task_url
)
initialization_data_iterable = blueprint.get_initialization_data()
# Link the job together
job = self.supervisor.register_job(
architect, task_runner, provider, existing_qualifications
)
if self.supervisor.sending_thread is None:
self.supervisor.launch_sending_thread()
except (KeyboardInterrupt, Exception) as e:
logger.error(
"Encountered error while launching run, shutting down", exc_info=True
)
try:
architect.shutdown()
except (KeyboardInterrupt, Exception) as architect_exception:
logger.exception(
f"Could not shut down architect: {architect_exception}",
exc_info=True,
)
raise e
launcher = TaskLauncher(
self.db,
task_run,
initialization_data_iterable,
max_num_concurrent_units=run_config.task.max_num_concurrent_units,
)
launcher.create_assignments()
launcher.launch_units(task_url)
self._task_runs_tracked[task_run.db_id] = TrackedRun(
task_run=task_run,
task_launcher=launcher,
task_runner=task_runner,
architect=architect,
job=job,
)
task_run.update_completion_progress(status=False)
return task_run.db_id
def _track_and_kill_runs(self):
"""
Background thread that shuts down servers when a task
is fully done.
"""
while not self.is_shutdown:
runs_to_check = list(self._task_runs_tracked.values())
for tracked_run in runs_to_check:
task_run = tracked_run.task_run
if tracked_run.task_launcher.finished_generators is False:
# If the run can still generate assignments, it's
# definitely not done
continue
task_run.update_completion_progress(
task_launcher=tracked_run.task_launcher
)
if not task_run.get_is_completed():
continue
else:
self.supervisor.shutdown_job(tracked_run.job)
tracked_run.architect.shutdown()
tracked_run.task_launcher.shutdown()
del self._task_runs_tracked[task_run.db_id]
time.sleep(RUN_STATUS_POLL_TIME)
def force_shutdown(self, timeout=5):
"""
Force a best-effort shutdown of everything, letting no individual
shutdown step suspend for more than the timeout before moving on.
Skips waiting for in-flight assignments to rush the shutdown.
** Should only be used in sandbox or test environments. **
"""
self.is_shutdown = True
def end_launchers_and_expire_units():
for tracked_run in self._task_runs_tracked.values():
tracked_run.task_launcher.shutdown()
tracked_run.task_launcher.expire_units()
def end_architects():
for tracked_run in self._task_runs_tracked.values():
tracked_run.architect.shutdown()
def shutdown_supervisor():
if self.supervisor is not None:
self.supervisor.shutdown()
tasks = {
"expire-units": end_launchers_and_expire_units,
"kill-architects": end_architects,
"fire-supervisor": shutdown_supervisor,
}
for tname, t in tasks.items():
shutdown_thread = threading.Thread(target=t, name=f"force-shutdown-{tname}")
shutdown_thread.start()
start_time = time.time()
while time.time() - start_time < timeout and shutdown_thread.is_alive():
time.sleep(0.5)
if not shutdown_thread.is_alive():
# Only join if the shutdown fully completed
shutdown_thread.join()
def shutdown(self, skip_input=True):
logger.info("operator shutting down")
self.is_shutdown = True
runs_to_check = list(self._task_runs_tracked.items())
for run_id, tracked_run in runs_to_check:
logger.info(f"Expiring units for task run {run_id}.")
try:
tracked_run.task_launcher.shutdown()
except (KeyboardInterrupt, SystemExit) as e:
logger.info(
f"Skipping waiting for launcher threads to join on task run {run_id}."
)
def cant_cancel_expirations(self, sig, frame):
logging.warn(
"Ignoring ^C during unit expirations. ^| if you NEED to exit and you will "
"clean up units that hadn't been expired afterwards."
)
old_handler = signal.signal(signal.SIGINT, cant_cancel_expirations)
tracked_run.task_launcher.expire_units()
signal.signal(signal.SIGINT, old_handler)
try:
remaining_runs = self._task_runs_tracked.values()
while len(remaining_runs) > 0:
next_runs = []
for tracked_run in remaining_runs:
if tracked_run.task_run.get_is_completed():
tracked_run.architect.shutdown()
else:
next_runs.append(tracked_run)
if len(next_runs) > 0:
logger.info(
f"Waiting on {len(remaining_runs)} task runs with assignments in-flight "
f"Ctrl-C ONCE to kill running tasks and FORCE QUIT."
)
time.sleep(30)
remaining_runs = next_runs
except Exception as e:
logger.exception(
f"Encountered problem during shutting down {e}", exc_info=True
)
import traceback
traceback.print_exc()
except (KeyboardInterrupt, SystemExit) as e:
logger.info(
"Skipping waiting for outstanding task completions, shutting down servers now!"
)
for tracked_run in remaining_runs:
logger.info(
f"Shutting down Architect for task run {tracked_run.task_run.db_id}"
)
tracked_run.architect.shutdown()
finally:
self.supervisor.shutdown()
self._run_tracker_thread.join()
def validate_and_run_config(
self, run_config: DictConfig, shared_state: Optional[SharedTaskState] = None
) -> Optional[str]:
"""
Wrapper around validate_and_run_config_or_die that prints errors on
failure, rather than throwing. Generally for use in scripts.
"""
try:
return self.validate_and_run_config_or_die(
run_config=run_config, shared_state=shared_state
)
except (KeyboardInterrupt, Exception) as e:
logger.error("Ran into error while launching run: ", exc_info=True)
return None
def parse_and_launch_run_wrapper(
self,
arg_list: Optional[List[str]] = None,
extra_args: Optional[Dict[str, Any]] = None,
) -> Optional[str]:
"""
Wrapper around parse and launch run that prints errors on failure, rather
than throwing. Generally for use in scripts.
"""
raise Exception(
"Operator.parse_and_launch_run_wrapper has been deprecated in favor "
"of using Hydra for argument configuration. See the docs at "
"https://github.com/facebookresearch/Mephisto/blob/main/docs/hydra_migration.md "
"in order to upgrade."
)
def print_run_details(self):
"""Print details about running tasks"""
# TODO(#93) parse these tasks and get the full details
for task in self.get_running_task_runs():
logger.info(f"Operator running task ID = {task}")
def wait_for_runs_then_shutdown(
self, skip_input=False, log_rate: Optional[int] = None
) -> None:
"""
Wait for task_runs to complete, and then shutdown.
Set log_rate to get print statements of currently running tasks
at the specified interval
"""
try:
try:
last_log = 0.0
while len(self.get_running_task_runs()) > 0:
if log_rate is not None:
if time.time() - last_log > log_rate:
last_log = time.time()
self.print_run_details()
time.sleep(RUN_STATUS_POLL_TIME)
except Exception as e:
if skip_input:
raise e
traceback.print_exc()
should_quit = input(
"The above exception happened while running a task, do "
"you want to shut down? (y)/n: "
)
if should_quit not in ["n", "N", "no", "No"]:
raise e
except Exception as e:
import traceback
traceback.print_exc()
except (KeyboardInterrupt, SystemExit) as e:
logger.exception(
"Cleaning up after keyboard interrupt, please wait!", exc_info=True
)
finally:
self.shutdown()
|
# Generated by Django 3.1.7 on 2021-05-16 06:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AsteriskPublication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('devicestate_publish', models.CharField(blank=True, max_length=40, null=True)),
('mailboxstate_publish', models.CharField(blank=True, max_length=40, null=True)),
('device_state', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('device_state_filter', models.CharField(blank=True, max_length=256, null=True)),
('mailbox_state', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('mailbox_state_filter', models.CharField(blank=True, max_length=256, null=True)),
],
options={
'verbose_name': 'Ps_asterisk_publications',
'verbose_name_plural': 'Ps_asterisk_publications',
'db_table': 'ps_asterisk_publications',
},
),
migrations.CreateModel(
name='Contacts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uri', models.CharField(blank=True, max_length=511, null=True)),
('expiration_time', models.IntegerField(blank=True, null=True, verbose_name='expiration_time')),
('quality_frequency', models.IntegerField(blank=True, null=True, verbose_name='quality_frequency')),
('outbound_proxy', models.CharField(blank=True, max_length=40, null=True)),
('path', models.TextField(blank=True, null=True, verbose_name='path')),
('user_agent', models.CharField(blank=True, max_length=255, null=True)),
('quality_timeout', models.FloatField(blank=True, null=True, verbose_name='quality_timeout')),
('reg_server', models.CharField(blank=True, max_length=255, null=True)),
('authenticate_quality', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('via_addr', models.CharField(blank=True, max_length=40, null=True)),
('via_port', models.IntegerField(blank=True, null=True, verbose_name='expiration_time')),
('call_id', models.CharField(blank=True, max_length=255, null=True)),
('endpoint', models.CharField(blank=True, max_length=40, null=True)),
('prune_on_boot', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
],
options={
'verbose_name': 'Ps_contacts',
'verbose_name_plural': 'Ps_contacts',
'db_table': 'ps_contacts',
},
),
migrations.CreateModel(
name='Contexts',
fields=[
('name', models.CharField(max_length=25, primary_key=True, serialize=False, verbose_name='le contexte')),
('full_name', models.CharField(max_length=25, verbose_name='la description')),
('incoming', models.BooleanField(choices=[(True, 'Entrant'), (False, 'Sortant')], db_index=True, default=False, verbose_name='Entrant')),
],
options={
'verbose_name': 'Context',
'verbose_name_plural': 'Contexts',
'db_table': 'contexts',
},
),
migrations.CreateModel(
name='Endpoints_id_ips',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('endpoint', models.CharField(blank=True, max_length=40, null=True)),
('match', models.CharField(blank=True, max_length=80, null=True)),
('srv_lookups', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('match_header', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'verbose_name': 'Ps_endpoint_id_ips',
'verbose_name_plural': 'Ps_endpoint_id_ips',
'db_table': 'ps_endpoint_id_ips',
},
),
migrations.CreateModel(
name='IvrDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, null=True)),
('description', models.CharField(blank=True, max_length=150, null=True)),
('announcement', models.IntegerField(blank=True, null=True)),
('directdial', models.CharField(blank=True, max_length=50, null=True)),
('invalid_loops', models.CharField(blank=True, max_length=10, null=True)),
('invalid_retry_recording', models.CharField(blank=True, max_length=25, null=True)),
('invalid_destination', models.CharField(blank=True, max_length=50, null=True)),
('timeout_enabled', models.CharField(blank=True, max_length=50, null=True)),
('invalid_recording', models.CharField(blank=True, max_length=25, null=True)),
('retvm', models.CharField(blank=True, max_length=8, null=True)),
('timeout_time', models.IntegerField(blank=True, null=True)),
('timeout_recording', models.CharField(blank=True, max_length=25, null=True)),
('timeout_retry_recording', models.CharField(blank=True, max_length=25, null=True)),
('timeout_destination', models.CharField(blank=True, max_length=50, null=True)),
('timeout_loops', models.CharField(blank=True, max_length=10, null=True)),
('timeout_append_announce', models.IntegerField(blank=True, null=True)),
('invalid_append_announce', models.IntegerField(blank=True, null=True)),
('timeout_ivr_ret', models.IntegerField(blank=True, null=True)),
('invalid_ivr_ret', models.IntegerField(blank=True, null=True)),
('alertinfo', models.CharField(blank=True, max_length=150, null=True)),
('rvolume', models.CharField(blank=True, max_length=2, null=True)),
],
options={
'verbose_name': 'Ivr_details',
'verbose_name_plural': 'Ivr_details',
'db_table': 'ivr_details',
},
),
migrations.CreateModel(
name='Ps_aors',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False, unique=True, verbose_name='Unique ID')),
('max_contacts', models.PositiveIntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], verbose_name='max_contacts')),
('qualify_frequency', models.PositiveIntegerField(choices=[(30, 30), (40, 40), (50, 50), (60, 60)], verbose_name='Qualites de Frequence')),
('contact', models.CharField(blank=True, db_column='contact', max_length=255, null=True)),
('default_expiration', models.PositiveIntegerField(blank=True, null=True)),
('minimum_expiration', models.PositiveIntegerField(blank=True, null=True)),
('remove_existing', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('authenticate_qualify', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('maximum_expiration', models.PositiveIntegerField(blank=True, null=True)),
('outbound_proxy', models.CharField(blank=True, max_length=40, null=True)),
('support_path', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('qualify_timeout', models.FloatField(blank=True, null=True)),
('voicemail_extension', models.CharField(blank=True, max_length=40, null=True)),
],
options={
'verbose_name': 'Ps_aor',
'verbose_name_plural': 'Ps_aors',
'db_table': 'ps_aors',
},
),
migrations.CreateModel(
name='Ps_auths',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False, unique=True, verbose_name='Unique ID')),
('auth_type', models.CharField(blank=True, default='userpass', max_length=200, null=True)),
('nonce_lifetime', models.PositiveIntegerField(blank=True, null=True)),
('md5_cred', models.CharField(blank=True, max_length=40, null=True)),
('password', models.CharField(max_length=80)),
('realm', models.CharField(blank=True, max_length=40, null=True)),
('username', models.CharField(help_text='101', max_length=40)),
('refresh_token', models.CharField(blank=True, max_length=255, null=True)),
('oauth_clientid', models.CharField(blank=True, max_length=255, null=True)),
('oauth_secret', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'verbose_name': 'Ps_auth',
'verbose_name_plural': 'Ps_auths',
'db_table': 'ps_auths',
},
),
migrations.CreateModel(
name='QueueLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.CharField(db_index=True, editable=False, max_length=20, null=True)),
('callid', models.CharField(blank=True, db_index=True, editable=False, max_length=32)),
('queue', models.CharField(blank=True, db_column='queuename', db_index=True, editable=False, max_length=32)),
('agent', models.CharField(blank=True, db_index=True, editable=False, max_length=32)),
('event', models.CharField(blank=True, db_index=True, editable=False, max_length=32)),
('data', models.TextField(blank=True, editable=False)),
],
options={
'db_table': 'queues_log',
},
),
migrations.CreateModel(
name='QueueMember',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, unique=True, verbose_name='Unique ID')),
('queue_name', models.CharField(blank=True, max_length=80, null=True)),
('interface', models.CharField(blank=True, max_length=80, null=True)),
('membername', models.CharField(blank=True, max_length=80, null=True)),
('state_interface', models.CharField(blank=True, max_length=80, null=True)),
('penalty', models.IntegerField(blank=True, null=True)),
('paused', models.IntegerField(blank=True, null=True)),
('uniqueid', models.IntegerField(blank=True, null=True)),
('wrapuptime', models.IntegerField(blank=True, null=True)),
],
options={
'verbose_name': 'Queue_members',
'verbose_name_plural': 'Queue_members',
'db_table': 'queue_members',
},
),
migrations.CreateModel(
name='QueueRules',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, unique=True, verbose_name='Unique ID')),
('rule_name', models.CharField(blank=True, max_length=80, null=True)),
('time', models.CharField(blank=True, max_length=32, null=True)),
('min_penalty', models.CharField(blank=True, max_length=32, null=True)),
('max_penalty', models.CharField(blank=True, max_length=32, null=True)),
],
options={
'verbose_name': 'Queues_rules',
'verbose_name_plural': 'Queues_rules',
'db_table': 'queues_rules',
},
),
migrations.CreateModel(
name='QueuesConfig',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, unique=True, verbose_name='Unique ID')),
('extension', models.CharField(blank=True, max_length=20, null=True)),
('descr', models.CharField(blank=True, max_length=35, null=True)),
('grppre', models.CharField(blank=True, max_length=100, null=True)),
('alertinfo', models.CharField(blank=True, max_length=254, null=True)),
('ringing', models.IntegerField(blank=True, null=True)),
('maxwait', models.CharField(blank=True, max_length=8, null=True)),
('password', models.CharField(blank=True, max_length=20, null=True)),
('ivr_id', models.CharField(blank=True, max_length=8, null=True)),
('dest', models.CharField(blank=True, max_length=50, null=True)),
('cwignore', models.IntegerField(blank=True, null=True)),
('queuewait', models.IntegerField(blank=True, null=True)),
('use_queue_context', models.IntegerField(blank=True, null=True)),
('togglehint', models.IntegerField(blank=True, null=True)),
('qnoanswer', models.IntegerField(blank=True, null=True)),
('callconfirm', models.IntegerField(blank=True, null=True)),
('callconfirm_id', models.IntegerField(blank=True, null=True)),
('qregex', models.CharField(blank=True, max_length=255, null=True)),
('agentannounce_id', models.IntegerField(blank=True, null=True)),
('joinannounce_id', models.IntegerField(blank=True, null=True)),
('monitor_type', models.CharField(blank=True, max_length=5, null=True)),
('monitor_heard', models.IntegerField(blank=True, null=True)),
('monitor_spoken', models.IntegerField(blank=True, null=True)),
('callback_id', models.CharField(blank=True, max_length=8, null=True)),
],
options={
'db_table': 'queues_config',
},
),
migrations.CreateModel(
name='VoiceMail',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False, unique=True, verbose_name='Unique ID')),
('uniqueid', models.IntegerField(blank=True, null=True)),
('mailbox', models.CharField(blank=True, max_length=80, null=True)),
('password', models.CharField(blank=True, max_length=80, null=True)),
('fullname', models.CharField(blank=True, max_length=80, null=True)),
('alias', models.CharField(blank=True, max_length=80, null=True)),
('email', models.CharField(blank=True, max_length=80, null=True)),
('pager', models.CharField(blank=True, max_length=80, null=True)),
('attach', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('attachfmt', models.CharField(blank=True, max_length=10, null=True)),
('servermail', models.CharField(blank=True, max_length=80, null=True)),
('language', models.CharField(blank=True, max_length=20, null=True)),
('tz', models.CharField(blank=True, max_length=30, null=True)),
('deletevoicemail', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('sayid', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('saycid', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('sendvoicemail', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('review', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('tempgreetwarn', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('operator', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('envelope', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('sayduration', models.IntegerField(blank=True, null=True)),
('forcename', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=200, null=True)),
('forcegreetings', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=200, null=True)),
('callback', models.CharField(blank=True, max_length=80, null=True)),
('dialout', models.CharField(blank=True, max_length=80, null=True)),
('exitcontext', models.CharField(blank=True, max_length=80, null=True)),
('maxmsg', models.IntegerField(blank=True, null=True)),
('volgain', models.IntegerField(blank=True, null=True)),
('imapuser', models.CharField(blank=True, max_length=80, null=True)),
('imappassword', models.CharField(blank=True, max_length=80, null=True)),
('imapserver', models.CharField(blank=True, max_length=80, null=True)),
('imapport', models.CharField(blank=True, max_length=8, null=True)),
('imapflags', models.CharField(blank=True, max_length=80, null=True)),
('stamp', models.DateTimeField(auto_now_add=True)),
('context', models.ForeignKey(blank=True, db_column='context', help_text='le contexte', null=True, on_delete=django.db.models.deletion.CASCADE, to='pbx.contexts')),
],
options={
'db_table': 'voicemail',
},
),
migrations.CreateModel(
name='Sippeers',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False, unique=True, verbose_name='Unique ID')),
('name', models.CharField(help_text='Numéro', max_length=15)),
('ipaddr', models.GenericIPAddressField(blank=True, help_text='Addresse IP', null=True)),
('port', models.PositiveIntegerField(blank=True, help_text='Port de clients non dynamiques', null=True)),
('regseconds', models.BigIntegerField(blank=True, help_text='regseconds', null=True)),
('defaultuser', models.CharField(blank=True, help_text='Le serveur Asterisk enverra des INVITE à username@defaultip', max_length=15, null=True)),
('fullcontact', models.CharField(blank=True, help_text='fullcontact', max_length=100, null=True)),
('regserver', models.CharField(blank=True, help_text='regserver', max_length=20, null=True)),
('useragent', models.CharField(blank=True, help_text='useragent', max_length=20, null=True)),
('lastms', models.CharField(blank=True, help_text='lastms', max_length=100, null=True)),
('host', models.CharField(blank=True, default='dynamic', help_text="Liaison à un hôte ou une adresse IP spécifique, ou 'dynamique '", max_length=25, null=True)),
('type', models.CharField(blank=True, choices=[('friend', 'friend'), ('peer', 'peer'), ('user', 'user')], default='friend', help_text="Type d'utilisateur", max_length=8, null=True)),
('permit', models.CharField(blank=True, default='0.0.0.0/0', help_text='sous-réseaux autorisés', max_length=25, null=True)),
('deny', models.CharField(blank=True, default='0.0.0.0/0', help_text='sous-réseaux interdits', max_length=25, null=True)),
('secret', models.CharField(blank=True, help_text='Laisser vide pour générer', max_length=15, null=True)),
('md5secret', models.CharField(blank=True, max_length=40, null=True)),
('remotesecret', models.CharField(blank=True, max_length=40, null=True)),
('dtmfmode', models.CharField(blank=True, choices=[('rfc2833', 'rfc2833'), ('info', 'info'), ('shortinfo', 'shortinfo'), ('inband', 'inband'), ('auto', 'auto')], help_text="En mode automatique, Asterisk utilisera le mode rfc2833 pour la transmission DTMF, par défaut, mais passera en mode intrabande pour les signaux DTMF, si le client distant n`'indique pas dans le message SDP qu'il prend en charge le mode de transmission DTMF - rfc2833", max_length=10, null=True)),
('transport', models.CharField(blank=True, choices=[('udp', 'udp'), ('tcp', 'tcp'), ('udp,tcp', 'udp,tcp'), ('tcp,udp', 'tcp,udp')], help_text='Transport des données', max_length=40, null=True)),
('directmedia', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no'), ('nonat', 'nonat'), ('update', 'update')], default='no', help_text='Autoriser ou non le trafic direct', max_length=6, null=True)),
('nat', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no'), ('never', 'never'), ('route', 'route')], default='no', help_text='Autoriser ou non le travail via NAT', max_length=5, null=True)),
('callgroup', models.CharField(blank=True, help_text='callgroup', max_length=40, null=True)),
('pickupgroup', models.CharField(blank=True, max_length=40, null=True)),
('language', models.CharField(blank=True, max_length=40, null=True)),
('disallow', models.CharField(default='all', help_text='codecs interdits', max_length=40, null=True)),
('allow', models.CharField(default='all', help_text='codecs autorisés', max_length=40, null=True)),
('insecure', models.CharField(blank=True, choices=[('port', "Ignorez le numéro de port d'où provient l'authentification"), ('invite', "Ne nécessite pas d'INVITATION initiale pour l'authentification"), ('port,invite', "N'exigez pas l'INVITE initiale pour l'authentification et ignorez le port d'où provient la demande")], default='', help_text='ignorer', max_length=20, null=True)),
('trustrpid', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], default='no', help_text="Puis-je faire confiance à l'ID de partie distante reçu du client SIP", max_length=3, null=True)),
('progressinband', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no'), ('never', 'never')], default='', help_text='ignorer', max_length=20, null=True)),
('promiscredir', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('useclientcode', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('accountcode', models.CharField(blank=True, max_length=40, null=True)),
('setvar', models.CharField(blank=True, max_length=40, null=True)),
('callerid', models.CharField(blank=True, max_length=40, null=True)),
('amaflags', models.CharField(blank=True, max_length=40, null=True)),
('callcounter', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('busylevel', models.PositiveIntegerField(blank=True, null=True)),
('allowoverlap', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('allowsubscribe', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('videosupport', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('maxcallbitrate', models.PositiveIntegerField(blank=True, null=True)),
('rfc2833compensate', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('mailbox', models.CharField(help_text='101@default', max_length=40)),
('session_timers', models.CharField(blank=True, choices=[('accept', 'accept'), ('refuse', 'refuse'), ('originate', 'originate')], db_column='session-timers', max_length=10, null=True)),
('session_expires', models.PositiveIntegerField(blank=True, db_column='session-expires', null=True)),
('session_minse', models.PositiveIntegerField(blank=True, db_column='session-minse', null=True)),
('session_refresher', models.CharField(blank=True, choices=[('uac', 'uac'), ('uas', 'uas')], db_column='session-refresher', max_length=3, null=True)),
('t38pt_usertpsource', models.CharField(blank=True, max_length=40, null=True)),
('regexten', models.CharField(blank=True, max_length=40, null=True)),
('fromdomain', models.CharField(blank=True, max_length=40, null=True)),
('fromuser', models.CharField(blank=True, max_length=40, null=True)),
('qualify', models.CharField(blank=True, max_length=40, null=True)),
('defaultip', models.CharField(blank=True, max_length=40, null=True)),
('rtptimeout', models.PositiveIntegerField(blank=True, null=True)),
('rtpholdtimeout', models.PositiveIntegerField(blank=True, null=True)),
('sendrpid', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('outboundproxy', models.CharField(blank=True, max_length=40, null=True)),
('callbackextension', models.CharField(blank=True, max_length=40, null=True)),
('timert1', models.PositiveIntegerField(blank=True, null=True)),
('timerb', models.PositiveIntegerField(blank=True, null=True)),
('qualifyfreq', models.PositiveIntegerField(blank=True, null=True)),
('constantssrc', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('contactpermit', models.CharField(blank=True, max_length=40, null=True)),
('contactdeny', models.CharField(blank=True, max_length=40, null=True)),
('usereqphone', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('textsupport', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('faxdetect', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('buggymwi', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('auth', models.CharField(blank=True, max_length=40, null=True)),
('fullname', models.CharField(blank=True, max_length=40, null=True)),
('trunkname', models.CharField(blank=True, max_length=40, null=True)),
('cid_number', models.CharField(blank=True, max_length=40, null=True)),
('callingpres', models.CharField(blank=True, choices=[('allowed_not_screened', 'allowed_not_screened'), ('allowed_passed_screen', 'allowed_passed_screen'), ('allowed_failed_screen', 'allowed_failed_screen'), ('allowed', 'allowed'), ('prohib_not_screened', 'prohib_not_screened'), ('prohib_passed_screen', 'prohib_passed_screen'), ('prohib_failed_screen', 'prohib_failed_screen'), ('prohib', 'prohib')], help_text='callingpres', max_length=25, null=True)),
('mohinterpret', models.CharField(blank=True, max_length=40, null=True)),
('mohsuggest', models.CharField(blank=True, max_length=40, null=True)),
('parkinglot', models.CharField(blank=True, max_length=40, null=True)),
('hasvoicemail', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('subscribemwi', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('vmexten', models.CharField(blank=True, max_length=40, null=True)),
('autoframing', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('rtpkeepalive', models.PositiveIntegerField(blank=True, null=True)),
('call_limit', models.PositiveIntegerField(blank=True, db_column='call-limit', null=True)),
('g726nonstandard', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('ignoresdpversion', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('allowtransfer', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('dynamic', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('context', models.ForeignKey(blank=True, db_column='context', help_text='le contexte', null=True, on_delete=django.db.models.deletion.CASCADE, to='pbx.contexts')),
],
options={
'verbose_name': 'Sippeers',
'verbose_name_plural': 'Sippeers',
'db_table': 'sippeers',
},
),
migrations.CreateModel(
name='Sip_conf',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False, unique=True, verbose_name='Unique ID')),
('name', models.CharField(help_text='Numéro', max_length=15)),
('host', models.CharField(default='dynamic', help_text="Liaison à un hôte ou une adresse IP spécifique, ou 'dynamique '", max_length=25)),
('nat', models.CharField(choices=[('yes', 'yes'), ('no', 'no'), ('never', 'never'), ('route', 'route')], default='no', help_text='Autoriser ou non le travail via NAT', max_length=5)),
('type', models.CharField(choices=[('friend', 'friend'), ('peer', 'peer'), ('user', 'user')], default='friend', help_text="Type d'utilisateur", max_length=8)),
('accountcode', models.CharField(blank=True, max_length=20, null=True)),
('amaflags', models.CharField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], default='billing', help_text='indicateurs spéciaux pour contrôler le calcul par défaut', max_length=20, null=True)),
('callgroup', models.CharField(blank=True, help_text='callgroup', max_length=40, null=True)),
('callerid', models.CharField(blank=True, help_text="Laisser vide pour l'auto-substitution", max_length=250, null=True)),
('cancallforward', models.CharField(choices=[('yes', 'yes'), ('no', 'no')], default='yes', help_text="autoriser ou non le transfert d'appel", max_length=3)),
('directmedia', models.CharField(choices=[('yes', 'yes'), ('no', 'no'), ('nonat', 'nonat'), ('update', 'update')], default='no', help_text='Autoriser ou non le trafic direct', max_length=6)),
('defaultip', models.CharField(blank=True, help_text="Si vous connaissez l'adresse IP du téléphone, vous pouvez la saisir ici. Ces paramètres seront utilisés lors des appels vers ce téléphone s'il n'est pas déjà enregistré auprès du serveur. Après l'enregistrement, le téléphone lui-même indiquera à Asterisk sous quel nom d'utilisateur et quelle adresse IP il est disponible.", max_length=25, null=True)),
('dtmfmode', models.CharField(choices=[('rfc2833', 'rfc2833'), ('info', 'info'), ('shortinfo', 'shortinfo'), ('inband', 'inband'), ('auto', 'auto')], help_text="En mode automatique, Asterisk utilisera le mode rfc2833 pour la transmission DTMF, par défaut, mais passera en mode intrabande pour les signaux DTMF, si le client distant n`'indique pas dans le message SDP qu'il prend en charge le mode de transmission DTMF - rfc2833", max_length=10)),
('fromuser', models.CharField(blank=True, max_length=80, null=True)),
('fromdomain', models.CharField(blank=True, max_length=80, null=True)),
('insecure', models.CharField(blank=True, choices=[('port', "Ignorez le numéro de port d'où provient l'authentification"), ('invite', "Ne nécessite pas d'INVITATION initiale pour l'authentification"), ('port,invite', "N'exigez pas l'INVITE initiale pour l'authentification et ignorez le port d'où provient la demande")], default='', help_text='ignorer', max_length=20, null=True)),
('language', models.CharField(default='fr', help_text='langue', max_length=2)),
('mailbox', models.CharField(help_text="Laisser vide pour l'auto-substitution", max_length=15, null=True)),
('md5secret', models.CharField(blank=True, help_text='Mot de passe MD5', max_length=80, null=True)),
('deny', models.CharField(blank=True, help_text='sous-réseaux interdits', max_length=25, null=True)),
('permit', models.CharField(blank=True, help_text='sous-réseaux autorisés', max_length=25, null=True)),
('mask', models.CharField(blank=True, help_text='obsolète', max_length=25, null=True)),
('musiconhold', models.CharField(blank=True, choices=[('ring 1', 'ring 1'), ('ring 2', 'ring 2'), ('ring 3', 'ring 3'), ('ring 4', 'ring 4'), ('ring 5', 'ring 5'), ('ring 6', 'ring 6'), ('ring 7', 'ring 7'), ('ring 8', 'ring 8'), ('ring 9', 'ring 9'), ('ring 10', 'ring 10')], db_column='musiconhold', db_index=True, help_text='musique en attente', max_length=100, null=True)),
('pickupgroup', models.CharField(blank=True, max_length=80, null=True)),
('qualify', models.CharField(choices=[('yes', 'yes'), ('no', 'no')], default='no', help_text="si oui, Asterisk enverra périodiquement (une fois toutes les 2 secondes) un message OPTIONS SIP pour vérifier que cet appareil fonctionne et qu'il est disponible pour passer des appels. Si un appareil donné ne répond pas dans un délai spécifié, Asterisk considère que cet appareil est éteint et indisponible pour passer des appels.", max_length=5)),
('regexten', models.CharField(blank=True, max_length=80, null=True)),
('restrictcid', models.CharField(blank=True, help_text='obsolète', max_length=25, null=True)),
('rtptimeout', models.CharField(blank=True, max_length=3, null=True)),
('rtpholdtimeout', models.CharField(blank=True, max_length=3, null=True)),
('secret', models.CharField(blank=True, help_text='Laisser vide pour générer', max_length=15)),
('setvar', models.CharField(blank=True, help_text='obsolète', max_length=25, null=True)),
('disallow', models.CharField(default='all', help_text='codecs interdits', max_length=100)),
('allow', models.CharField(default='alaw', help_text='codecs autorisés', max_length=100)),
('comment', models.TextField(blank=True, help_text='commenter', null=True)),
('trustrpid', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], default='no', help_text="Puis-je faire confiance à l'ID de partie distante reçu du client SIP", max_length=3, null=True)),
('sendrpid', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], default='yes', help_text='Il est nécessaire de transférer le SIP vers le client Remote-Party-ID', max_length=3, null=True)),
('videosupport', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], default='no', help_text='Support vidéo', max_length=3, null=True)),
('fullcontact', models.CharField(blank=True, help_text='fullcontact', max_length=80, null=True)),
('ipaddr', models.GenericIPAddressField(blank=True, help_text='Pour la compatibilité', null=True)),
('port', models.PositiveIntegerField(blank=True, help_text='Port de clients non dynamiques', null=True)),
('regseconds', models.BigIntegerField(blank=True, help_text='Pour la compatibilité', null=True)),
('username', models.CharField(blank=True, help_text='username', max_length=100, null=True)),
('regserver', models.CharField(blank=True, help_text='regserver', max_length=100, null=True)),
('useragent', models.CharField(blank=True, help_text='useragent', max_length=100, null=True)),
('lastms', models.CharField(blank=True, help_text='lastms', max_length=100, null=True)),
('defaultuser', models.CharField(blank=True, help_text='Le serveur Asterisk enverra des INVITE à username@defaultip', max_length=15, null=True)),
('context', models.ForeignKey(blank=True, db_column='context', help_text='le contexte', null=True, on_delete=django.db.models.deletion.CASCADE, to='pbx.contexts')),
],
options={
'db_table': 'sip_conf',
},
),
migrations.CreateModel(
name='Queue',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, unique=True, verbose_name='Unique ID')),
('name', models.CharField(blank=True, max_length=128, null=True)),
('musiconhold', models.CharField(blank=True, choices=[('ring 1', 'ring 1'), ('ring 2', 'ring 2'), ('ring 3', 'ring 3'), ('ring 4', 'ring 4'), ('ring 5', 'ring 5'), ('ring 6', 'ring 6'), ('ring 7', 'ring 7'), ('ring 8', 'ring 8'), ('ring 9', 'ring 9'), ('ring 10', 'ring 10')], max_length=128, null=True)),
('announce', models.CharField(blank=True, max_length=128, null=True)),
('timeout', models.IntegerField(blank=True, null=True)),
('ringinuse', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('setinterfacevar', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('setqueueentryvar', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('monitor_format', models.CharField(blank=True, max_length=8, null=True)),
('membermacro', models.CharField(blank=True, max_length=512, null=True)),
('membergosub', models.CharField(blank=True, max_length=512, null=True)),
('queue_youarenext', models.CharField(blank=True, max_length=128, null=True)),
('queue_thereare', models.CharField(blank=True, max_length=128, null=True)),
('queue_callswaiting', models.CharField(blank=True, max_length=128, null=True)),
('queue_quantity1', models.CharField(blank=True, max_length=128, null=True)),
('queue_quantity2', models.CharField(blank=True, max_length=128, null=True)),
('queue_holdtime', models.CharField(blank=True, max_length=128, null=True)),
('queue_minutes', models.CharField(blank=True, max_length=128, null=True)),
('queue_minute', models.CharField(blank=True, max_length=128, null=True)),
('queue_seconds', models.CharField(blank=True, max_length=128, null=True)),
('queue_thankyou', models.CharField(blank=True, max_length=128, null=True)),
('queue_callerannonce', models.CharField(blank=True, max_length=128, null=True)),
('queue_reporthold', models.CharField(blank=True, max_length=128, null=True)),
('annonce_frequency', models.IntegerField(blank=True, null=True)),
('announce_to_first_user', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('min_announce_frequency', models.IntegerField(blank=True, null=True)),
('annonce_round_seconds', models.IntegerField(blank=True, null=True)),
('announce_holdtime', models.CharField(blank=True, max_length=128, null=True)),
('announce_position', models.CharField(blank=True, max_length=128, null=True)),
('announce_position_limit', models.IntegerField(blank=True, null=True)),
('periodic_announce', models.CharField(blank=True, max_length=50, null=True)),
('periodic_announce_frequency', models.IntegerField(blank=True, null=True)),
('relative_periodic_announce', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('random_periodic_announce', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('retry', models.IntegerField(blank=True, null=True)),
('wrapuptime', models.IntegerField(blank=True, null=True)),
('penaltymemberslimit', models.IntegerField(blank=True, null=True)),
('autofill', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('monitor_type', models.CharField(blank=True, max_length=128, null=True)),
('autopause', models.CharField(blank=True, choices=[('queue', 'queue'), ('autopause', 'autopause')], max_length=128, null=True)),
('autopausedelay', models.IntegerField(blank=True, null=True)),
('autopausebusy', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('autopauseunavail', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('maxlen', models.IntegerField(blank=True, null=True)),
('servicelevel', models.IntegerField(blank=True, null=True)),
('strategy', models.CharField(blank=True, choices=[('queue', 'queue'), ('strategy', 'strategy')], max_length=128, null=True)),
('joinemplty', models.CharField(blank=True, max_length=128, null=True)),
('leavewhenempty', models.CharField(blank=True, max_length=128, null=True)),
('reportholdtime', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('memberdelay', models.IntegerField(blank=True, null=True)),
('weight', models.IntegerField(blank=True, null=True)),
('timeoutrestart', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=128, null=True)),
('defaultrule', models.CharField(blank=True, max_length=128, null=True)),
('timeoutpriority', models.CharField(blank=True, max_length=128, null=True)),
('context', models.ForeignKey(db_column='context', max_length=200, on_delete=django.db.models.deletion.CASCADE, to='pbx.contexts')),
],
options={
'verbose_name': 'Queues',
'verbose_name_plural': 'Queues',
'db_table': 'queues',
},
),
migrations.CreateModel(
name='Extensions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('exten', models.CharField(blank=True, db_index=True, help_text='exten / template', max_length=40, null=True)),
('priority', models.IntegerField(blank=True, help_text='priorité', null=True)),
('app', models.CharField(blank=True, choices=[('Dial', '(Dial) Compose le numéro'), ('HangUp', '(HangUp) Posez le téléphone'), ('Wait', '(Wait) Attendez x secondes'), ('Answer', '(Answer) Décrocher le téléphone'), ('AddQueueMember', "AddQueueMember (file d'attente, canal) Supprimer du groupe"), ('Queue', "Queue (queue_name) Aller à la file d'attente"), ('PlayBack', '(PlayBack) Lire le fichier son'), ('Set', '(Set) Définir la variable'), ('Read', '(Read) Lisez le clav en changement.'), ('BackGround', '(BackGround) Jouez le son et attendez prolongé'), ('Bridge', '(Bridge) Faire un pont de 2 canaux'), ('Busy', '(Busy) Retour occupé'), ('ChangeMonitor', '(ChangeMonitor) Tour. surveiller le fichier de canal'), ('Congestion', '(Congestion) Direction surchargée'), ('DBdel', '(DBdel) Supprimer la clé de int. DB'), ('DBdeltree', "(DBdeltree) Supprimer l'arbre de l'ext. DB"), ('Echo', '(Echo) Jouer audible'), ('ConfBridge', '(ConfBridge) Créer un pont de conférence'), ('Exec', '(Exec) Exécutez adj. plan de numérotation'), ('ExecIf', '(ExecIf) Exécutez si'), ('ExecIfTime', '(ExecIfTime) Exécuter if + time'), ('GoSub', "GoSub([[context|]extension|]priority) Aller à l'extension puis revenir"), ('GoTo', "GoTo([[context|]extension|]priority) Aller à l'extension"), ('GoToIf', 'GoToIf(condition?context1,extension1,priority1:context2,extension2,priority2)'), ('GotoIfTime', '(GotoIfTime) Allez ext. si + temps'), ('ImportVar', '(ImportVar) Importer une variable dans une nouvelle'), ('Incomplete', '(Incomplete) Revenir insatisfait'), ('Macro', '(Macro) Exécuter la macro'), ('MacroExclusive', '(MacroExclusive) Exécuter. une seule macro'), ('MacroIf', '(MacroIf) Macro si'), ('Monitor', '(Monitor) Surveillance des canaux'), ('StopMonitor', '(StopMonitor) Arrêter la surveillance des canaux'), ('MSet', '(MSet) Const. variables de canal'), ('MusicOnHold', '(MusicOnHold)Écouter de la musique en attente'), ('NoCDR', "(NoCDR) N'écrivez pas de CDR"), ('NoOp', '(NoOp) Rien à faire'), ('Park', '(Park) se garer'), ('MeetMeChannelAdmin', '(MeetMeChannelAdmin) Administration des canaux'), ('ParkedCall', '(ParkedCall) Répondre à garé'), ('PauseMonitor', '(PauseMonitor) Mettre le moniteur en pause'), ('Proceeding', "(Proceeding) L'appel est passé"), ('Progress', '(Progress) Appel en cours'), ('RaiseException', '(RaiseException) Lancer une exception'), ('ReadExten', '(ReadExten) Lire le numéro depuis AC'), ('ReadFile', '(ReadFile) Autre fichier en perm. canaliser'), ('MeetMeAdmin', '(MeetMeAdmin) Administration de la salle'), ('Record', '(Record) Ecrire un fichier'), ('ResetCDR', '(ResetCDR) Réinitialiser le CDR'), ('RetryDial', "(RetryDial) Recomposer en cas d'échec"), ('RemoveQueueMember', 'RemoveQueueMember(queue,channel) Ajouter au groupe'), ('Ringing', '(Ringing) Le téléphone sonne'), ('SayAlpha', '(SayAlpha) Dites à Alpha'), ('SayDigits', '(SayDigits) Énoncez les nombres'), ('SayNumber', '(SayNumber) Numéro de conversation'), ('SayPhonetic', '(SayPhonetic) Parlez phonétiquement'), ('SendFAX', '(SendFAX) Envoyer un fax'), ('ReceiveFAX', '(ReceiveFAX) Recevoir un fax'), ('SetAMAFlags', '(SetAMAFlags) Définir le drapeau AMA'), ('SetCallerPres', "(SetCallerPres) Définir l'affichage de l'ID de l'appelant"), ('SetMusicOnHold', '(SetMusicOnHold) Installez des muses. attentes'), ('SIPAddHeader', '(SIPAddHeader) Âge. ish. zag. sip package'), ('SIPDtmfMode', '(SIPDtmfMode) Changer le mode DTMF'), ('SIPRemoveHeader', '(SIPRemoveHeader) Ud. ish. zag. sip package'), ('StartMusicOnHold', '(StartMusicOnHold) Commencez à jouer à MOH'), ('MeetMeCount', '(MeetMeCount) Contrer'), ('Transfer', '(Transfer) Transférer un appel vers un numéro'), ('TryExec', "(TryExec) Essayez d'exécuter"), ('TrySystem', "(TrySystem) Essayez d'exécuter. UNIX"), ('System', '(System) Exécuter la commande UNIX'), ('UnpauseMonitor', '(UnpauseMonitor) Réactiver le moniteur'), ('WaitExten', '(WaitExten) Attendez un autre'), ('WaitMusicOnHold', "(WaitMusicOnHold) Attendez l'ajout. jouer à MOH"), ('MeetMe', '(MeetMe) Application de conférence'), ('SLAStation', '(SLAStation) Exécuter la ligne partagée'), ('SLATrunk', '(SLATrunk) Apparence de ligne partagée')], db_index=True, help_text='application de plan de numérotation', max_length=40, null=True)),
('appdata', models.CharField(blank=True, db_index=True, help_text="paramètres d'application", max_length=256, null=True)),
('context', models.ForeignKey(blank=True, db_column='context', max_length=40, null=True, on_delete=django.db.models.deletion.CASCADE, to='pbx.contexts')),
],
options={
'verbose_name': 'Extension',
'verbose_name_plural': 'Extensions',
'db_table': 'extensions',
},
),
migrations.CreateModel(
name='Endpoints',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False, unique=True, verbose_name='Unique ID')),
('transport', models.CharField(choices=[('transport-udp', 'transport-udp'), ('transport-tcp', 'transport-tcp')], max_length=40, null=True, verbose_name='transport')),
('aors', models.PositiveIntegerField(db_column='aors', help_text='101', unique=True)),
('auth', models.PositiveIntegerField(db_column='auth', help_text='101', unique=True)),
('disallow', models.CharField(blank=True, choices=[('all', 'all'), ('alaw', 'alaw'), ('disallow', 'disallow')], default='all', max_length=200, null=True)),
('allow', models.CharField(blank=True, choices=[('all', 'all'), ('alaw', 'alaw'), ('disallow', 'disallow')], default='all', max_length=200, null=True)),
('direct_media', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('deny', models.CharField(blank=True, default='0.0.0.0/0', max_length=95, null=True, verbose_name='sous-réseaux interdits')),
('permit', models.CharField(blank=True, default='0.0.0.0/0', max_length=95, null=True, verbose_name='sous-réseaux autorisés')),
('mailboxes', models.CharField(help_text='100@default', max_length=40, null=True)),
('connected_line_method', models.CharField(blank=True, help_text='pjsip_connected_line_method_values', max_length=100, null=True)),
('direct_media_method', models.CharField(blank=True, help_text='pjsip_connected_line_method_values', max_length=100, null=True)),
('direct_media_glare_mitigation', models.CharField(blank=True, help_text='pjsip_direct_media_glare_mitigation_values', max_length=100, null=True)),
('disable_direct_media_on_nat', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('dtmf_mode', models.CharField(blank=True, help_text='pjsip_dtmf_mode_values_v3', max_length=100, null=True)),
('external_media_address', models.CharField(blank=True, max_length=40, null=True)),
('force_rport', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('ice_support', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('identify_by', models.CharField(blank=True, max_length=40, null=True)),
('moh_suggest', models.CharField(blank=True, max_length=40, null=True)),
('outbound_auth', models.CharField(blank=True, max_length=40, null=True)),
('outbound_proxy', models.CharField(blank=True, max_length=40, null=True)),
('rewrite_contact', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('rtp_ipv6', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('rtp_symmetric', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('send_diversion', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('send_pai', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('send_rpid', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('timers_min_se', models.PositiveIntegerField(blank=True, null=True)),
('timers', models.CharField(blank=True, help_text='pjsip_timer_values', max_length=100, null=True)),
('timers_sess_expires', models.PositiveIntegerField(blank=True, null=True)),
('callerid', models.CharField(blank=True, max_length=40, null=True)),
('callerid_privacy', models.CharField(blank=True, help_text='pjsip_cid_privacy_values', max_length=100, null=True)),
('callerid_tag', models.CharField(blank=True, max_length=40, null=True)),
('r100rel', models.CharField(blank=True, db_column='100rel', help_text='pjsip_100rel_values', max_length=100, null=True)),
('aggregate_mwi', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('trust_id_inbound', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('trust_id_outbound', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('use_ptime', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('use_avpf', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('media_encryption', models.CharField(blank=True, help_text='pjsip_media_encryption_values', max_length=100, null=True)),
('inband_progress', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('call_group', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('pickup_group', models.CharField(blank=True, max_length=40, null=True)),
('named_call_group', models.CharField(blank=True, max_length=40, null=True)),
('named_pickup_group', models.CharField(blank=True, max_length=40, null=True)),
('device_state_busy_at', models.PositiveIntegerField(blank=True, null=True)),
('fax_detect', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('t38_udptl', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('t38_udptl_ec', models.CharField(blank=True, help_text='pjsip_t38udptl_ec_values', max_length=100, null=True)),
('t38_udptl_maxdatagram', models.PositiveIntegerField(blank=True, null=True)),
('t38_udptl_nat', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('t38_udptl_ipv6', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('tone_zone', models.CharField(blank=True, max_length=40, null=True)),
('language', models.CharField(blank=True, max_length=40, null=True)),
('one_touch_recording', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('record_on_feature', models.CharField(blank=True, max_length=40, null=True)),
('record_off_feature', models.CharField(blank=True, max_length=40, null=True)),
('rtp_engine', models.CharField(blank=True, max_length=40, null=True)),
('allow_transfer', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('allow_subscribe', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('sdp_owner', models.CharField(blank=True, max_length=40, null=True)),
('sdp_session', models.CharField(blank=True, max_length=40, null=True)),
('tos_audio', models.CharField(blank=True, max_length=10, null=True)),
('tos_video', models.CharField(blank=True, max_length=10, null=True)),
('sub_min_expiry', models.PositiveIntegerField(blank=True, null=True)),
('from_domain', models.CharField(blank=True, max_length=40, null=True)),
('from_user', models.CharField(blank=True, max_length=40, null=True)),
('mwi_from_user', models.CharField(blank=True, max_length=40, null=True)),
('dtls_verify', models.CharField(blank=True, max_length=40, null=True)),
('dtls_rekey', models.CharField(blank=True, max_length=40, null=True)),
('dtls_cert_file', models.CharField(blank=True, max_length=200, null=True)),
('dtls_private_key', models.CharField(blank=True, max_length=200, null=True)),
('dtls_cipher', models.CharField(blank=True, max_length=200, null=True)),
('dtls_ca_file', models.CharField(blank=True, max_length=200, null=True)),
('dtls_ca_path', models.CharField(blank=True, max_length=200, null=True)),
('dtls_setup', models.CharField(blank=True, help_text='pjsip_dtls_setup_values', max_length=200, null=True)),
('srtp_tag_32', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('media_address', models.CharField(blank=True, max_length=40, null=True)),
('redirect_method', models.CharField(blank=True, help_text='pjsip_redirect_method_values', max_length=200, null=True)),
('set_var', models.TextField(blank=True, null=True)),
('cos_audio', models.PositiveIntegerField(blank=True, null=True)),
('cos_video', models.PositiveIntegerField(blank=True, null=True)),
('message_context', models.CharField(blank=True, max_length=40, null=True)),
('force_avp', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('media_use_received_transport', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('accountcode', models.CharField(blank=True, max_length=80, null=True)),
('user_eq_phone', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('moh_passthrough', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('media_encryption_optimistic', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('rpid_immediate', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('g726_non_standard', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('rtp_keepalive', models.PositiveIntegerField(blank=True, null=True)),
('rtp_timeout', models.PositiveIntegerField(blank=True, null=True)),
('rtp_timeout_hold', models.PositiveIntegerField(blank=True, null=True)),
('bind_rtp_to_media_address', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('voicemail_extension', models.CharField(blank=True, max_length=40, null=True)),
('mwi_subscribe_replaces_unsolicited', models.CharField(blank=True, help_text='ast_bool_values', max_length=200, null=True)),
('acl', models.CharField(blank=True, max_length=40, null=True)),
('contact_deny', models.CharField(blank=True, max_length=95, null=True)),
('contact_permit', models.CharField(blank=True, max_length=95, null=True)),
('contact_acl', models.CharField(blank=True, max_length=40, null=True)),
('subscribe_context', models.CharField(blank=True, max_length=40, null=True)),
('uniqueid', models.PositiveIntegerField(blank=True, null=True, unique=True)),
('fax_detect_timeout', models.PositiveIntegerField(blank=True, null=True)),
('contact_user', models.CharField(blank=True, max_length=80, null=True)),
('preferred_codec_only', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('asymmetric_rtp_codec', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('rtcp_mux', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('allow_overlap', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('refer_blind_progress', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('notify_early_inuse_ringing', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('max_audio_streams', models.PositiveIntegerField(blank=True, null=True)),
('max_video_streams', models.PositiveIntegerField(blank=True, null=True)),
('webrtc', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('dtls_fingerprint', models.CharField(blank=True, help_text='sha_hash_values', max_length=200, null=True)),
('incoming_mwi_mailbox', models.CharField(blank=True, max_length=40, null=True)),
('bundle', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('dtls_auto_generate_cert', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('follow_early_media_fork', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('accept_multiple_sdp_answers', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('suppress_q850_reason_headers', models.CharField(blank=True, choices=[('yes', 'yes'), ('no', 'no')], max_length=3, null=True)),
('trust_connected_line', models.CharField(blank=True, help_text='ast_bool_values', max_length=200, null=True)),
('send_connected_line', models.CharField(blank=True, help_text='ast_bool_values', max_length=200, null=True)),
('ignore_183_without_sdp', models.CharField(blank=True, help_text='ast_bool_values', max_length=200, null=True)),
('send_history_info', models.CharField(blank=True, help_text='ast_bool_values', max_length=200, null=True)),
('context', models.ForeignKey(db_column='context', max_length=200, on_delete=django.db.models.deletion.CASCADE, to='pbx.contexts')),
],
options={
'verbose_name': 'Ps_endpoints',
'verbose_name_plural': 'Ps_endpoints',
'db_table': 'ps_endpoints',
},
),
]
|
from rest_framework import serializers
from cards.models import CardEffect
def validate_effect_modifiers(card_effect: CardEffect, power: int or None, range_: float or None):
"""
Checks if given card effect should have modifiers and checks if they were provided.
If they were not provided raises ValidationError.
:param card_effect: Given card effect.
:param power: Given power data.
:param range_: Given power range data.
:return: None.
:raise: serializers.ValidationError
"""
if card_effect.has_modifier:
if power is None or range_ is None:
raise serializers.ValidationError("All effect's modifiers must be provided.")
|
# 3.2/pinmux.py
# Part of PyBBIO
# github.com/alexanderhiam/PyBBIO
# MIT License
#
# Beaglebone pinmux driver
# For Beaglebones with 3.2 kernel
from config import *
from sysfs import kernelFileIO
def pinMux(gpio_pin, mode, preserve_mode_on_exit=False):
""" Uses kernel omap_mux files to set pin modes. """
# There's no simple way to write the control module registers from a
# user-level process because it lacks the proper privileges, but it's
# easy enough to just use the built-in file-based system and let the
# kernel do the work.
fn = GPIO[gpio_pin][0]
try:
with open(PINMUX_PATH+fn, 'wb') as f:
f.write(hex(mode)[2:]) # Write hex string (stripping off '0x')
except IOError:
print "*omap_mux file not found: '%s'" % (PINMUX_PATH+fn)
def export(gpio_pin):
""" Reserves a pin for userspace use with sysfs /sys/class/gpio interface.
Returns True if pin was exported, False if it was already under
userspace control. """
if ("USR" in gpio_pin):
# The user LEDs are already under userspace control
return False
gpio_num = GPIO[gpio_pin][2]
gpio_file = '%s/gpio%i' % (GPIO_FILE_BASE, gpio_num)
if (os.path.exists(gpio_file)):
# Pin already under userspace control
return False
with open(EXPORT_FILE, 'wb') as f:
f.write(str(gpio_num))
return True
def unexport(gpio_pin):
""" Returns a pin to the kernel with sysfs /sys/class/gpio interface.
Returns True if pin was unexported, False if it was already under
kernel control. """
if ("USR" in gpio_pin):
# The user LEDs are always under userspace control
return False
gpio_num = GPIO[gpio_pin][2]
gpio_file = '%s/gpio%i' % (GPIO_FILE_BASE, gpio_num)
if (not os.path.exists(gpio_file)):
# Pin not under userspace control
return False
with open(UNEXPORT_FILE, 'wb') as f:
f.write(str(gpio_num))
return True
|
import backoff
import requests
import yaml
from artifactory import ArtifactoryPath, RepositoryLocal
import zipfile
import os
import subprocess
from multiprocessing.dummy import Pool
from functools import partial
from delivery_tool.exceptions import ApplicationException
import tempfile
tf = tempfile.TemporaryDirectory()
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_tries=10)
def thread_process(artifactory_url, image):
image = image[image.rfind('/') + len('/'):]
image_name = image[image.rfind('/') + len('/'):image.rfind(':')]
subprocess.run(['skopeo', '--insecure-policy', 'copy', '--dest-tls-verify=false',
'--format', 'v2s2', '--src-shared-blob-dir', f"{tf.name}/layers",
'oci:' + tf.name + '/images/' + image_name,
'docker://' + artifactory_url + '/' + image])
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_tries=10)
def connect(url, name, creds):
art = ArtifactoryPath(url + '/' + name, auth=(creds['login'], creds['password']))
return art
def upload(config, archive, log, creds):
exceptions = []
url = config['url']
name = config['repositories']['files']
name_docker = config['repositories']['docker']
try:
art = connect(url, name, creds)
except requests.exceptions.RequestException as e:
exceptions.append(e)
res_docker = art.find_repository_local(name_docker)
rep = art.find_repository_local(name)
if rep is None:
repos = RepositoryLocal(art, name)
repos.create()
else:
repos = rep
summary_size = 0
summary_size_docker = 0
for p in res_docker:
summary_size_docker += p.stat().size
res = art.find_repository_local(name)
for p in res:
summary_size += p.stat().size
log.info(f"Summary size of files in {name} is {round(summary_size / 1048576, 2)} MB")
log.info(f"Summary size of files in {name_docker} is {round(summary_size_docker / 1048576, 2)} MB")
zipfile.ZipFile(archive).extractall(tf.name)
log.info("===== Uploading docker images =====")
subprocess.run(['skopeo', '--insecure-policy', 'login', '--tls-verify=false', '-u', creds['login'], '-p',
creds['password'], config['docker_registry']])
for i in os.listdir(tf.name):
if (i != 'images') and (i != 'layers') and (i != 'images_info.yaml'):
repos.deploy_file(tf.name + '/' + i)
with open(tf.name + '/images_info.yaml', 'r') as im:
images_list = yaml.load(im, Loader=yaml.Loader)
pool = Pool(4)
try:
func = partial(thread_process, config['docker_registry'])
except requests.exceptions.RequestException as e:
exceptions.append(e)
threads = pool.map(func, images_list['images'])
pool.close()
pool.join()
summary_size_last = 0
summary_size_docker_last = 0
for p in res_docker:
summary_size_docker_last += p.stat().size
res = art.find_repository_local(name)
for p in res:
summary_size_last += p.stat().size
log.info(f"Summary size of files in {name} after the uploading is {round(summary_size_last / 1048576, 2)} MB")
log.info(f"Summary size of files in {name_docker} after the uploading is {round(summary_size_docker_last / 1048576, 2)} MB")
log.info(f"The difference in {name} is {round((summary_size_last - summary_size) / 1048576, 2)} MB")
log.info(f"The difference in {name_docker} is {round((summary_size_docker_last - summary_size_docker) / 1048576, 2)} MB")
if exceptions:
raise ApplicationException("Some files were not uploaded:" + '\n'.join(exceptions))
if not exceptions:
log.info("All the files have been uploaded successfully")
|
import cmws.slurm_util
import cmws.examples.scene_understanding.run
def get_run_argss():
mode = "cube"
experiment_name = f"noColor_{mode}"
for seed in range(5):
for num_grid_rows, num_grid_cols in [[2, 2]]:
for shrink_factor in [0.01,0.03,0.1,0.3]:
if num_grid_rows == 3:
num_primitives = 15 # increase primitives
else: num_primitives = 10
# CMWS
args = cmws.examples.scene_understanding.run.get_args_parser().parse_args([])
args.experiment_name = experiment_name
args.num_grid_rows = num_grid_rows
args.num_grid_cols = num_grid_cols
args.seed = seed
args.num_primitives = num_primitives
args.num_particles = 5
args.memory_size = 5
args.num_proposals_mws = 5
args.insomnia = 0.50
args.algorithm = "cmws_5"
args.model_type = "scene_understanding"
args.continue_training = True
args.remove_color=True
args.mode=mode
args.shrink_factor = shrink_factor
yield args
# RWS
args = cmws.examples.scene_understanding.run.get_args_parser().parse_args([])
args.experiment_name = experiment_name
args.num_grid_rows = num_grid_rows
args.num_grid_cols = num_grid_cols
args.seed = seed
args.num_primitives = num_primitives
args.num_particles = 50
args.insomnia = 0.50
args.algorithm = "rws"
args.model_type = "scene_understanding"
args.continue_training = True
args.remove_color = True
args.mode = mode
args.shrink_factor = shrink_factor
yield args
args = cmws.examples.scene_understanding.run.get_args_parser().parse_args([])
args.experiment_name = experiment_name
args.num_grid_rows = num_grid_rows
args.num_grid_cols = num_grid_cols
args.seed = seed
args.num_primitives = num_primitives
args.num_particles = 30
args.insomnia = 0.50
args.algorithm = "vimco_2"
args.model_type = "scene_understanding"
args.continue_training = True
args.remove_color = True
args.mode = mode
args.shrink_factor = shrink_factor
args.lr = 1e-4
yield args
args = cmws.examples.scene_understanding.run.get_args_parser().parse_args([])
args.experiment_name = experiment_name
args.num_grid_rows = num_grid_rows
args.num_grid_cols = num_grid_cols
args.seed = seed
args.num_primitives = num_primitives
args.num_particles = 30
args.insomnia = 0.50
args.algorithm = "reinforce"
args.model_type = "scene_understanding"
args.continue_training = True
args.remove_color = True
args.mode = mode
args.lr = 1e-4
args.shrink_factor = shrink_factor
yield args
def get_job_name(run_args):
return cmws.examples.scene_understanding.run.get_config_name(run_args)
def main(args):
cmws.slurm_util.submit_slurm_jobs(
get_run_argss, cmws.examples.scene_understanding.run.get_config_name, get_job_name, args.no_repeat, args.cancel, args.rm
)
if __name__ == "__main__":
parser = cmws.slurm_util.get_parser()
args = parser.parse_args()
main(args)
|
import pickle
import secrets
from collections import namedtuple
OfferDiff = namedtuple('OfferDiff', ('new', 'deleted'))
class PublishedMessage:
def __init__(self, text=None, messageId=None, authKeys=None):
self.text = text
self.messageId = messageId
self.authKeys = authKeys or list()
def addAuthKey(self, authKey=None):
if authKey is None:
authKey = secrets.token_hex(8)
if len(self.authKeys) > 2:
self.authKeys.pop(0)
self.authKeys.append(authKey)
def getNewestAuthKey(self):
return self.authKeys[-1]
def popAuthKey(self):
if len(self.authKeys) > 0:
self.authKeys.pop()
class Database:
@staticmethod
def loadOrCreate(path):
try:
with open(path, 'rb') as f:
db = pickle.load(f)
return db
except FileNotFoundError:
return Database()
def __init__(self):
self.publishedOffers = dict()
def putPublishedOffer(self, offerId, data):
self.publishedOffers[offerId] = data
def getOfferData(self, offerId):
return self.publishedOffers[offerId]
def getOrCreateOffer(self, offerId):
data = self.publishedOffers.get(offerId)
if data is None:
data = PublishedMessage()
self.publishedOffers[offerId] = data
return data
def deletePublishedOffer(self, offerId):
del self.publishedOffers[offerId]
def save(self, path):
with open(path, 'wb') as f:
pickle.dump(self, f)
|
def F(x):
return a*(x**4)+b*(x**3)+c*(x**2)+d*(x)+e-f
while True:
try:
a, b, c, d, e, f = [int(i) for i in raw_input().split()]
start = -10.0**70
end = 10.0**70
last = -1
spec = False
while True:
if(end-start == 1 and F(end)!= 0 and F(start) != 0):
spec = True
break
mid = (start+end)/2.0
if F(mid) == 0: break
if last == mid: break
if F(mid) > 0:
end = mid
else:
start = mid
last = mid
if spec or mid < 0: print "-1"
else: print int(mid)
except EOFError: break |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-05 02:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('operacion', '0013_orden_comision'),
]
operations = [
migrations.AddField(
model_name='orden',
name='activo',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='orden',
name='entrega',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='orden',
name='pago',
field=models.BooleanField(default=False),
),
]
|
#!/usr/bin/env python
# This is a simple script meant to retrieve all files from a web server with an HTML-fronted S3 bucket and scan
# the files for secrets using duroc_hog. It will then post the results to Insights.
import os
import gzip
import pprint
import re
import requests
import tempfile
import sys
import subprocess
import json
import logging
import xml.etree.ElementTree as ET
import htmllistparse
import time
import urllib.parse
import copy
from datetime import datetime
loglevel = "WARNING"
for arg in sys.argv:
if arg.startswith("--log="):
loglevel = arg[6:]
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError("Invalid log level: %s" % loglevel)
logging.basicConfig(level=numeric_level)
# initialize auth tokens, fail if not present
DOWNLOAD_CONFIG_PATH = os.environ["DOWNLOAD_CONFIG_PATH"]
INSIGHTS_INSERT_KEY = os.environ["INSIGHTS_INSERT_KEY"]
INSIGHTS_ACCT_ID = os.environ["INSIGHTS_ACCT_ID"]
DUROC_HOG_PATH = os.environ["DUROC_HOG_PATH"]
# config file format: [ { "url": string, "regex": string, "name": string, "recursive": bool } ... ]
# example: [ { "url":"https://download.newrelic.com/php_agent/release/", "regex":".*\\.tar\\.gz", "name":"PHP Agent", "recursive": false} ]
f_j = open(DOWNLOAD_CONFIG_PATH, "r")
config = json.load(f_j)
output_array = []
def scan_binary(file_url, content_item, config_item):
logging.debug(f"scan_binary({file_url}, {content_item}, {config_item}")
output_array = []
r = requests.get(file_url)
tempdir = tempfile.gettempdir()
filename = os.path.basename(urllib.parse.urlparse(file_url).path)
tempfile_path = os.path.join(tempdir, filename)
f = open(tempfile_path, "wb")
f.write(r.content)
f.close()
duroc_hog_output = subprocess.run(
[DUROC_HOG_PATH, "-z", tempfile_path], capture_output=True, check=True
)
json_output = json.loads(duroc_hog_output.stdout)
os.remove(tempfile_path)
for finding in json_output:
output_array.append(
{
"eventType": "s3weblisting_secret_monitor",
"reason": finding["reason"],
"path": finding["path"],
"url": file_url,
"filename": filename,
"name": config_item['name'],
}
)
return output_array
def scan_endpoint(config_item):
endpoint = config_item['endpoint']
regex = re.compile(config_item['regex'])
name = config_item['name']
recursive = config_item['recursive']
prefixes = config_item['prefixes']
after_date = datetime.fromisoformat(config_item['after_date'])
logging.debug(f"scan_endpoint({config_item}")
output_array = []
ns = {'aws': 'http://s3.amazonaws.com/doc/2006-03-01/'}
for prefix in prefixes:
url = f"https://{endpoint}.s3.amazonaws.com/?delimiter=/&prefix={prefix}"
et_root = None
try:
et_root = ET.fromstring(requests.get(url).text)
except:
logging.error(f"ET.fromstring(requests.get({url}).text) returned an exception")
for content_item in et_root.findall('aws:Contents', ns):
# logging.debug(f"content_item: {content_item}")
# logging.debug(f"content_item.find('aws:Key', ns): {content_item.find('aws:Key', ns)}")
key = content_item.find('aws:Key', ns).text
size = int(content_item.find('aws:Size', ns).text)
modified = datetime.fromisoformat(content_item.find('aws:LastModified', ns).text.replace('Z', '+00:00'))
if regex.search(key) and size > 0 and modified > after_date:
file_url = f"https://{endpoint}.s3.amazonaws.com/{key}"
output_array.extend(scan_binary(file_url, content_item, config_item))
if recursive:
new_config_item = copy.deepcopy(config_item)
new_prefixes = [content_item[0].text for content_item in et_root.findall('aws:CommonPrefixes', ns)]
if len(new_prefixes) > 0:
new_config_item['prefixes'] = new_prefixes
output_array.extend(scan_endpoint(new_config_item))
return output_array
output_array = [result for config_item in config for result in scan_endpoint(config_item)]
# for config_item in config:
# output_array.extend(scan_url(config_item))
url = "https://insights-collector.newrelic.com/v1/accounts/{INSIGHTS_ACCT_ID}/events"
headers = {
"Content-Type": "application/json",
"X-Insert-Key": INSIGHTS_INSERT_KEY,
"Content-Encoding": "gzip",
}
post = gzip.compress(json.dumps(output_array).encode("utf-8"))
logging.info(f"len(output_array) = {len(output_array)}")
logging.debug(output_array)
logging.info("Submitting data to New Relic Insights...")
r = requests.post(url, data=post, headers=headers)
logging.info(f"insights status code: {r.status_code}")
#
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
''' IMPORTS '''
from datetime import datetime
import dateparser
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' HELPER FUNCTIONS '''
class Client(BaseClient):
def __init__(self, base_url, username, password, client_id, client_secret, object_name, key_field, query_filter,
fields, history, verify, proxy, feedReputation, ok_codes=[], headers=None, auth=None):
super().__init__(base_url, verify=verify, proxy=proxy, ok_codes=ok_codes, headers=headers, auth=auth)
self.username = username
self.password = password
self.client_id = client_id
self.client_secret = client_secret
self.session_data = self.get_new_token()
if not self.session_data or not self.session_data['access_token']:
return_error("Failed to get access token for Salesforce integration")
self._headers = {
"Authorization": f"Bearer {self.session_data['access_token']}",
"Content-Type": "application/json"
}
self._base_url = urljoin(self._base_url, '/services/data/v39.0/')
self.object_name = object_name
self.key_field = key_field
self.query_filter = query_filter
self.fields = fields
self.history = history
self.feedReputation = feedReputation
self.score = 1 if self.feedReputation == 'Good'\
else 2 if self.feedReputation == 'Suspicious'\
else 3 if self.feedReputation == 'Bad'\
else 0
def get_new_token(self):
body = {
"grant_type": "password",
"client_id": self.client_id,
"client_secret": self.client_secret,
"username": self.username,
"password": self.password
}
res = self._http_request(
'POST',
'/services/oauth2/token',
headers={
'Content-Type': 'application/x-www-form-urlencoded'
},
data=body
)
return res
def query_object(self, fields, table, condition=None):
if condition:
query = f"SELECT {','.join(fields)} FROM {table} WHERE {condition}"
else:
query = f"SELECT {','.join(fields)} FROM {table}"
return self.raw_query(query)
def raw_query(self, query, raw_query=False):
params = {
"q": query
}
if raw_query:
res = self._http_request(
'GET',
f'{query}',
timeout=600
)
else:
res = self._http_request(
'GET',
'query',
params=params,
timeout=600
)
return res
def get_object_description(self):
res = self._http_request('GET', f'sobjects/{self.object_name}/describe/')
return res
def fetch_indicators_command(client, manual_run=False):
indicators_unparsed: List[Dict] = list()
indicators = list()
now = datetime.utcnow()
history_date = dateparser.parse(f"{client.history} days ago", settings={'RELATIVE_BASE': now})
assert history_date is not None, f'could not parse {client.history} days ago'
date_filter = history_date.strftime("%Y-%m-%dT%H:%M:%S.000+0000")
latest_mod_date = None
last_run = demisto.getLastRun().get('lastRun')
object_fields = None
if client.fields:
object_fields = client.fields.split(",")
else:
object_fields = sorted([x['name'] for x in client.get_object_description()['fields']])
if "id" not in object_fields and "Id" not in object_fields:
object_fields.append("id")
if "CreatedDate" not in object_fields and "CreatedDate" not in object_fields:
object_fields.append("CreatedDate")
if "LastModifiedDate" not in object_fields and "LastModifiedDate" not in object_fields:
object_fields.append("LastModifiedDate")
# Define whether to use a user provided query
if client.query_filter:
search_criteria = f"{client.query_filter}"
# If there is a last run date, use it if there is not already one specified
if last_run and "LastModifiedDate" not in client.query_filter:
search_criteria = f"LastModifiedDate >= {last_run} AND {client.query_filter}"
else:
# Define which date range to use if there is no user criteria
if last_run:
search_criteria = f"LastModifiedDate >= {last_run}"
else:
search_criteria = f"CreatedDate >= {date_filter}"
indicators_raw = client.query_object(object_fields, client.object_name, search_criteria)
if indicators_raw.get('totalSize', 0) > 0:
for indicator in indicators_raw.get('records', []):
indicators_unparsed.append({k: v for k, v in indicator.items() if k != 'attributes'})
more_records = True if indicators_raw.get('nextRecordsUrl', None) else False
while more_records:
next_records = "/".join(indicators_raw.get('nextRecordsUrl').split("/")[-2:])
indicators_raw = client.raw_query(next_records, raw_query=True)
for indicator in indicators_raw.get('records', []):
indicators_unparsed.append({k: v for k, v in indicator.items() if k != 'attributes'})
more_records = True if indicators_raw.get('nextRecordsUrl', None) else False
if indicators_unparsed and len(indicators_unparsed) > 0:
mod_date = sorted([x.get('LastModifiedDate')for x in indicators_unparsed], reverse=True)[0] # type: ignore
latest_mod_date = dateparser.parse(mod_date) # type: ignore
for item in indicators_unparsed:
try:
value = item[client.key_field] if client.key_field in item else None
if value:
item['object_name'] = client.object_name
indicator = {
"value": value,
"type": client.object_name,
"rawJSON": item,
"score": client.score
}
indicators.append(indicator)
except Exception:
pass
if not manual_run:
# Update the last run time if there was a LastModifiedDate found
if latest_mod_date:
last_run = latest_mod_date.strftime("%Y-%m-%dT%H:%M:00Z")
demisto.setLastRun({"lastRun": last_run})
# We submit indicators in batches
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
else:
demisto.results({
"SFDC.Indicators": indicators,
"Count": len(indicators)
})
def test_module(client):
demisto.results('ok')
def main():
params = demisto.params()
proxies = handle_proxy()
verify_certificate = not params.get('insecure', False)
url = params.get('InstanceURL')
credentials = params.get('credentials')
username = credentials.get('identifier')
password = credentials.get('password')
client_id = params.get('clientID')
client_secret = params.get('clientSecret')
object_name = params.get('object')
key_field = params.get('key_field')
query_filter = params.get('filter', None)
fields = params.get('fields', None)
history = params.get('indicator_history', 365)
reputation = params.get('feedReputation', 'None')
command = demisto.command()
client = Client(url, username, password, client_id, client_secret, object_name, key_field,
query_filter, fields, history, verify_certificate, proxies, reputation)
if command == 'test-module':
test_module(client)
elif command == 'fetch-indicators':
fetch_indicators_command(client)
elif command == 'salesforce-get-indicators':
fetch_indicators_command(client, manual_run=True)
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
|
from django.apps import AppConfig
class GifcolAppConfig(AppConfig):
name = 'gifcol_app'
|
import pyexlatex as pl
import pyexlatex.table as lt
import pyexlatex.presentation as lp
import pyexlatex.graphics as lg
import pyexlatex.layouts as ll
import plbuild
from lectures.intro.main import get_intro_lecture
from plbuild.paths import images_path
from schedule.main import LECTURE_1_NAME
AUTHORS = ['Nick DeRobertis']
DOCUMENT_CLASS = pl.Document
OUTPUT_LOCATION = plbuild.paths.DOCUMENTS_BUILD_PATH
HANDOUTS_OUTPUT_LOCATION = None
TITLE = LECTURE_1_NAME
ORDER = 'LN1'
def get_content():
return [
get_intro_lecture().to_models()
]
DOCUMENT_CLASS_KWARGS = {}
OUTPUT_NAME = TITLE
|
# coding=utf-8
# *** WARNING: this file was generated by Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
import pulumi_kubernetes
__all__ = ['IngressControllerArgs', 'IngressController']
@pulumi.input_type
class IngressControllerArgs:
def __init__(__self__, *,
controller: Optional[pulumi.Input['ControllerArgs']] = None,
default_backend: Optional[pulumi.Input['ControllerDefaultBackendArgs']] = None,
dh_param: Optional[pulumi.Input[str]] = None,
fullname_override: Optional[pulumi.Input[str]] = None,
helm_options: Optional[pulumi.Input['ReleaseArgs']] = None,
image_pull_secrets: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.LocalObjectReferenceArgs']]]] = None,
name_override: Optional[pulumi.Input[str]] = None,
pod_security_policy: Optional[pulumi.Input['ControllerPodSecurityPolicyArgs']] = None,
rbac: Optional[pulumi.Input['ControllerRBACArgs']] = None,
revision_history_limit: Optional[pulumi.Input[int]] = None,
service_account: Optional[pulumi.Input['ControllerServiceAccountArgs']] = None,
tcp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
udp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None):
"""
The set of arguments for constructing a IngressController resource.
:param pulumi.Input['ControllerDefaultBackendArgs'] default_backend: Default 404 backend.
:param pulumi.Input[str] dh_param: A base64ed Diffie-Hellman parameter. This can be generated with: openssl dhparam 4096 2> /dev/null | base64 Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param.
:param pulumi.Input[str] fullname_override: Overrides for generated resource names.
:param pulumi.Input['ReleaseArgs'] helm_options: HelmOptions is an escape hatch that lets the end user control any aspect of the Helm deployment. This exposes the entirety of the underlying Helm Release component args.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.LocalObjectReferenceArgs']]] image_pull_secrets: Optional array of imagePullSecrets containing private registry credentials Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/.
:param pulumi.Input[str] name_override: Overrides for generated resource names.
:param pulumi.Input['ControllerPodSecurityPolicyArgs'] pod_security_policy: If true, create & use Pod Security Policy resources https://kubernetes.io/docs/concepts/policy/pod-security-policy/
:param pulumi.Input['ControllerRBACArgs'] rbac: Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266
:param pulumi.Input[int] revision_history_limit: Rollback limit.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] tcp: TCP service key:value pairs Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] udp: UDP service key:value pairs Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md.
"""
if controller is not None:
pulumi.set(__self__, "controller", controller)
if default_backend is not None:
pulumi.set(__self__, "default_backend", default_backend)
if dh_param is not None:
pulumi.set(__self__, "dh_param", dh_param)
if fullname_override is not None:
pulumi.set(__self__, "fullname_override", fullname_override)
if helm_options is not None:
pulumi.set(__self__, "helm_options", helm_options)
if image_pull_secrets is not None:
pulumi.set(__self__, "image_pull_secrets", image_pull_secrets)
if name_override is not None:
pulumi.set(__self__, "name_override", name_override)
if pod_security_policy is not None:
pulumi.set(__self__, "pod_security_policy", pod_security_policy)
if rbac is not None:
pulumi.set(__self__, "rbac", rbac)
if revision_history_limit is not None:
pulumi.set(__self__, "revision_history_limit", revision_history_limit)
if service_account is not None:
pulumi.set(__self__, "service_account", service_account)
if tcp is not None:
pulumi.set(__self__, "tcp", tcp)
if udp is not None:
pulumi.set(__self__, "udp", udp)
@property
@pulumi.getter
def controller(self) -> Optional[pulumi.Input['ControllerArgs']]:
return pulumi.get(self, "controller")
@controller.setter
def controller(self, value: Optional[pulumi.Input['ControllerArgs']]):
pulumi.set(self, "controller", value)
@property
@pulumi.getter(name="defaultBackend")
def default_backend(self) -> Optional[pulumi.Input['ControllerDefaultBackendArgs']]:
"""
Default 404 backend.
"""
return pulumi.get(self, "default_backend")
@default_backend.setter
def default_backend(self, value: Optional[pulumi.Input['ControllerDefaultBackendArgs']]):
pulumi.set(self, "default_backend", value)
@property
@pulumi.getter(name="dhParam")
def dh_param(self) -> Optional[pulumi.Input[str]]:
"""
A base64ed Diffie-Hellman parameter. This can be generated with: openssl dhparam 4096 2> /dev/null | base64 Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param.
"""
return pulumi.get(self, "dh_param")
@dh_param.setter
def dh_param(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dh_param", value)
@property
@pulumi.getter(name="fullnameOverride")
def fullname_override(self) -> Optional[pulumi.Input[str]]:
"""
Overrides for generated resource names.
"""
return pulumi.get(self, "fullname_override")
@fullname_override.setter
def fullname_override(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fullname_override", value)
@property
@pulumi.getter(name="helmOptions")
def helm_options(self) -> Optional[pulumi.Input['ReleaseArgs']]:
"""
HelmOptions is an escape hatch that lets the end user control any aspect of the Helm deployment. This exposes the entirety of the underlying Helm Release component args.
"""
return pulumi.get(self, "helm_options")
@helm_options.setter
def helm_options(self, value: Optional[pulumi.Input['ReleaseArgs']]):
pulumi.set(self, "helm_options", value)
@property
@pulumi.getter(name="imagePullSecrets")
def image_pull_secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.LocalObjectReferenceArgs']]]]:
"""
Optional array of imagePullSecrets containing private registry credentials Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/.
"""
return pulumi.get(self, "image_pull_secrets")
@image_pull_secrets.setter
def image_pull_secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.LocalObjectReferenceArgs']]]]):
pulumi.set(self, "image_pull_secrets", value)
@property
@pulumi.getter(name="nameOverride")
def name_override(self) -> Optional[pulumi.Input[str]]:
"""
Overrides for generated resource names.
"""
return pulumi.get(self, "name_override")
@name_override.setter
def name_override(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name_override", value)
@property
@pulumi.getter(name="podSecurityPolicy")
def pod_security_policy(self) -> Optional[pulumi.Input['ControllerPodSecurityPolicyArgs']]:
"""
If true, create & use Pod Security Policy resources https://kubernetes.io/docs/concepts/policy/pod-security-policy/
"""
return pulumi.get(self, "pod_security_policy")
@pod_security_policy.setter
def pod_security_policy(self, value: Optional[pulumi.Input['ControllerPodSecurityPolicyArgs']]):
pulumi.set(self, "pod_security_policy", value)
@property
@pulumi.getter
def rbac(self) -> Optional[pulumi.Input['ControllerRBACArgs']]:
"""
Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266
"""
return pulumi.get(self, "rbac")
@rbac.setter
def rbac(self, value: Optional[pulumi.Input['ControllerRBACArgs']]):
pulumi.set(self, "rbac", value)
@property
@pulumi.getter(name="revisionHistoryLimit")
def revision_history_limit(self) -> Optional[pulumi.Input[int]]:
"""
Rollback limit.
"""
return pulumi.get(self, "revision_history_limit")
@revision_history_limit.setter
def revision_history_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "revision_history_limit", value)
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> Optional[pulumi.Input['ControllerServiceAccountArgs']]:
return pulumi.get(self, "service_account")
@service_account.setter
def service_account(self, value: Optional[pulumi.Input['ControllerServiceAccountArgs']]):
pulumi.set(self, "service_account", value)
@property
@pulumi.getter
def tcp(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
TCP service key:value pairs Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md.
"""
return pulumi.get(self, "tcp")
@tcp.setter
def tcp(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "tcp", value)
@property
@pulumi.getter
def udp(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:
"""
UDP service key:value pairs Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md.
"""
return pulumi.get(self, "udp")
@udp.setter
def udp(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]):
pulumi.set(self, "udp", value)
class IngressController(pulumi.ComponentResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
controller: Optional[pulumi.Input[pulumi.InputType['ControllerArgs']]] = None,
default_backend: Optional[pulumi.Input[pulumi.InputType['ControllerDefaultBackendArgs']]] = None,
dh_param: Optional[pulumi.Input[str]] = None,
fullname_override: Optional[pulumi.Input[str]] = None,
helm_options: Optional[pulumi.Input[pulumi.InputType['ReleaseArgs']]] = None,
image_pull_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.LocalObjectReferenceArgs']]]]] = None,
name_override: Optional[pulumi.Input[str]] = None,
pod_security_policy: Optional[pulumi.Input[pulumi.InputType['ControllerPodSecurityPolicyArgs']]] = None,
rbac: Optional[pulumi.Input[pulumi.InputType['ControllerRBACArgs']]] = None,
revision_history_limit: Optional[pulumi.Input[int]] = None,
service_account: Optional[pulumi.Input[pulumi.InputType['ControllerServiceAccountArgs']]] = None,
tcp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
udp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
__props__=None):
"""
Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ControllerDefaultBackendArgs']] default_backend: Default 404 backend.
:param pulumi.Input[str] dh_param: A base64ed Diffie-Hellman parameter. This can be generated with: openssl dhparam 4096 2> /dev/null | base64 Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param.
:param pulumi.Input[str] fullname_override: Overrides for generated resource names.
:param pulumi.Input[pulumi.InputType['ReleaseArgs']] helm_options: HelmOptions is an escape hatch that lets the end user control any aspect of the Helm deployment. This exposes the entirety of the underlying Helm Release component args.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.LocalObjectReferenceArgs']]]] image_pull_secrets: Optional array of imagePullSecrets containing private registry credentials Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/.
:param pulumi.Input[str] name_override: Overrides for generated resource names.
:param pulumi.Input[pulumi.InputType['ControllerPodSecurityPolicyArgs']] pod_security_policy: If true, create & use Pod Security Policy resources https://kubernetes.io/docs/concepts/policy/pod-security-policy/
:param pulumi.Input[pulumi.InputType['ControllerRBACArgs']] rbac: Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266
:param pulumi.Input[int] revision_history_limit: Rollback limit.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] tcp: TCP service key:value pairs Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md.
:param pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]] udp: UDP service key:value pairs Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[IngressControllerArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Ingress controller for Kubernetes using NGINX as a reverse proxy and load balancer
:param str resource_name: The name of the resource.
:param IngressControllerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IngressControllerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
controller: Optional[pulumi.Input[pulumi.InputType['ControllerArgs']]] = None,
default_backend: Optional[pulumi.Input[pulumi.InputType['ControllerDefaultBackendArgs']]] = None,
dh_param: Optional[pulumi.Input[str]] = None,
fullname_override: Optional[pulumi.Input[str]] = None,
helm_options: Optional[pulumi.Input[pulumi.InputType['ReleaseArgs']]] = None,
image_pull_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.LocalObjectReferenceArgs']]]]] = None,
name_override: Optional[pulumi.Input[str]] = None,
pod_security_policy: Optional[pulumi.Input[pulumi.InputType['ControllerPodSecurityPolicyArgs']]] = None,
rbac: Optional[pulumi.Input[pulumi.InputType['ControllerRBACArgs']]] = None,
revision_history_limit: Optional[pulumi.Input[int]] = None,
service_account: Optional[pulumi.Input[pulumi.InputType['ControllerServiceAccountArgs']]] = None,
tcp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
udp: Optional[pulumi.Input[Mapping[str, pulumi.Input[Mapping[str, pulumi.Input[str]]]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is not None:
raise ValueError('ComponentResource classes do not support opts.id')
else:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IngressControllerArgs.__new__(IngressControllerArgs)
__props__.__dict__["controller"] = controller
__props__.__dict__["default_backend"] = default_backend
__props__.__dict__["dh_param"] = dh_param
__props__.__dict__["fullname_override"] = fullname_override
__props__.__dict__["helm_options"] = helm_options
__props__.__dict__["image_pull_secrets"] = image_pull_secrets
__props__.__dict__["name_override"] = name_override
__props__.__dict__["pod_security_policy"] = pod_security_policy
__props__.__dict__["rbac"] = rbac
__props__.__dict__["revision_history_limit"] = revision_history_limit
__props__.__dict__["service_account"] = service_account
__props__.__dict__["tcp"] = tcp
__props__.__dict__["udp"] = udp
__props__.__dict__["status"] = None
super(IngressController, __self__).__init__(
'kubernetes-ingress-nginx:index:IngressController',
resource_name,
__props__,
opts,
remote=True)
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.ReleaseStatus']:
"""
Detailed information about the status of the underlying Helm deployment.
"""
return pulumi.get(self, "status")
|
# Generated by Django 3.1.4 on 2021-10-29 17:50
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0032_auto_20211029_2051'),
]
operations = [
migrations.AddField(
model_name='payment_link',
name='gender',
field=models.CharField(default='UniSex', max_length=20),
),
migrations.AlterField(
model_name='bookingrooms',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2021, 10, 29, 23, 20, 57, 981763)),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 00:37:13 2018
@author: jack.lingheng.meng
"""
import logging
import tensorflow as tf
import numpy as np
import time
from datetime import datetime, date
import sched
from threading import Thread
import os
from Environment.LASEnv import LASEnv
from LASAgent.InternalEnvOfAgent import InternalEnvOfAgent
from LASAgent.InternalEnvOfCommunity import InternalEnvOfCommunity
# Logging
logging.basicConfig(filename = '../ROM_Experiment_results/ROM_experiment_'+datetime.now().strftime("%Y%m%d-%H%M%S")+'.log',
level = logging.INFO,
format='%(asctime)s:%(levelname)s: %(message)s')
#######################################################################
# Instatiate LAS virtual environment #
#######################################################################
# Instantiate LAS environment object
envLAS = LASEnv('127.0.0.1', 19997, reward_function_type = 'occupancy')
observation = envLAS.reset()
#######################################################################
# Instatiate LAS-Agent #
#######################################################################
# Note: 1. Set load_pretrained_agent_flag to "True" only when you have
# and want to load pretrained agent.
# 2. Keep observation unchanged if using pretrained agent.
agent_name = 'LAS_Single_Agent'
observation_space = envLAS.observation_space
action_space = envLAS.action_space
observation_space_name = [],
action_space_name = []
x_order_MDP = 5
x_order_MDP_observation_type = 'concatenate_observation'
occupancy_reward_type = 'IR_distance'
interaction_mode = 'real_interaction'
load_pretrained_agent_flag = False
single_agent = InternalEnvOfAgent(agent_name,
observation_space,
action_space,
observation_space_name,
action_space_name,
x_order_MDP,
x_order_MDP_observation_type,
occupancy_reward_type,
interaction_mode,
load_pretrained_agent_flag)
logging.info('Instantiate LAS-Agent done!')
#######################################################################
# Instatiate LAS-Agent-Community #
#######################################################################
# Note: 1. Set load_pretrained_agent_flag to "True" only when you have and want
# to load pretrained agent.
# 2. Keep observation unchanged if using pretrained agent.
community_name = 'LAS_Agent_Community'
community_size = 3
x_order_MDP = 5
x_order_MDP_observation_type = 'concatenate_observation'
occupancy_reward_type = 'IR_distance'
interaction_mode = 'real_interaction'
load_pretrained_agent_flag = False
LAS_agent_community = InternalEnvOfCommunity(community_name,
community_size,
envLAS.observation_space,
envLAS.action_space,
envLAS.observation_space_name,
envLAS.action_space_name,
x_order_MDP,
x_order_MDP_observation_type,
occupancy_reward_type,
interaction_mode,
load_pretrained_agent_flag)
logging.info('Instantiate LAS-Agent-Community done!')
#######################################################################
# Schedual two experiments #
# Note:
# 1. Initialize Single_Agent and Agent_Community will take about 10 minutes.
# Thus, the master script should be run before 9:45am
# 2. Single_Agent.stop() will take about 3 minutes. Thus, if first experiment
# is stopped at 2:30pm, the second experiment should start at 2:35pm
# 3. Agent_Community.stop() will take about 10 minutes. Thus, if the second
# experiment is stopped at 4:00pm, the baseline bahavior should start at
# 4:15pm.
# Solution: to get rid of time-gap when switching behavior modes, use multiple
# threads to do Single_Agent.stop() and Agent_Community.stop().
#######################################################################
def interact_with_learning_agent(agent, env, end_time = '143000'):
try:
logging.info('Run {}, Start_time: {}, End_time: {}'.format(agent.name, datetime.now().strftime("%H%M%S"), end_time))
# Interact untill end_time or interrupted by 'Ctrl+c'
while not datetime.now().strftime("%H%M%S") > end_time:
observation = env._self_observe()
take_action_flag, action = agent.feed_observation(observation)
if take_action_flag == True:
observation, _, _, _ = env.step(action)
# Save learned model
saving_thread = Thread(agent.stop())
saving_thread.start()
logging.info('{}, Actual_End_time: {}'.format(agent.name, datetime.now().strftime("%H%M%S")))
except KeyboardInterrupt:
# Save learned model
saving_thread = Thread(agent.stop())
saving_thread.start()
logging.info('{}, Actual_End_time: {}'.format(agent.name, datetime.now().strftime("%H%M%S")))
def interact_with_prescribed_behavior():
# TODO: Please put prescribe behavior in this function.
pass
scheduler = sched.scheduler(time.time, time.sleep)
open_time = datetime.now()
# Schedule first experiment
first_experiment_start_time = '185000' # format: %H%M%S e.g. 1:00pm is 130000
first_experiment_end_time = '185500' # format: %H%M%S e.g. 2:30pm is 143000
first_experiment_start_delay = (datetime.strptime(date.today().strftime("%Y%m%d")+'-'+first_experiment_start_time, '%Y%m%d-%H%M%S') - open_time).total_seconds()
if first_experiment_start_delay < 0:
logging.error('First Experiment starts earlier than the open-time of ROM!')
scheduler.enter(first_experiment_start_delay,
1,
interact_with_learning_agent,
kwargs={'agent': single_agent,
'env': envLAS,
'end_time': first_experiment_end_time})
# Schedule second experiment
second_experiment_start_time = '185530' # format: %H%M%S e.g. 2:30pm is 143000
second_experiment_end_time = '191000' # format: %H%M%S e.g. 4:00pm is 160000
second_experiment_start_delay = (datetime.strptime(date.today().strftime("%Y%m%d")+'-'+second_experiment_start_time, '%Y%m%d-%H%M%S') - open_time).total_seconds()
if second_experiment_start_delay < 0:
logging.error('Second Experiment starts earlier than the end of First Experiment!')
scheduler.enter(second_experiment_start_delay, 1, interact_with_learning_agent,
kwargs={'agent': LAS_agent_community,
'env': envLAS,
'end_time': second_experiment_end_time})
if __name__ == '__main__':
# Run two experiments
logging.info('Run scheduler...')
scheduler.run()
logging.info('Scheduler done!')
envLAS.destroy()
|
# --------------------------------------------------------
# This code is modified from Jumpin2's repository.
# https://github.com/Jumpin2/HGA
# --------------------------------------------------------
import torch
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
from model.modules import linear_weightdrop as dropnn
from torch.autograd import Variable
from model.modules.rnn_encoder import SentenceEncoderRNN
from model.modules.gcn import VideoAdjLearner, GCN
from model.modules.position_embedding import PositionEncoding
from model.modules.ban.ban import BAN
from model.modules.fusion.fusion import MotionApprFusion, AttFlat
# torch.set_printoptions(threshold=np.inf)
class MASN(nn.Module):
def __init__(
self,
vocab_size,
s_layers,
s_embedding,
resnet_input_size,
i3d_input_size,
hidden_size,
dropout_p=0.0,
gcn_layers=2,
answer_vocab_size=None,
q_max_len=35,
v_max_len=80,
ablation='none'):
super().__init__()
self.ablation = ablation
self.q_max_len = q_max_len
self.v_max_len = v_max_len
self.hidden_size = hidden_size
self.compress_appr_local = dropnn.WeightDropLinear(
resnet_input_size,
hidden_size,
weight_dropout=dropout_p,
bias=False)
self.compress_motion_local = dropnn.WeightDropLinear(
i3d_input_size,
hidden_size,
weight_dropout=dropout_p,
bias=False)
self.compress_appr_global = dropnn.WeightDropLinear(
resnet_input_size,
hidden_size,
weight_dropout=dropout_p,
bias=False)
self.compress_motion_global = dropnn.WeightDropLinear(
i3d_input_size,
hidden_size,
weight_dropout=dropout_p,
bias=False)
embedding_dim = s_embedding.shape[1] if s_embedding is not None else hidden_size
self.glove = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)
if s_embedding is not None:
print("glove embedding weight is loaded!")
self.glove.weight = nn.Parameter(torch.from_numpy(s_embedding).float())
self.glove.weight.requires_grad = False
self.embedding_proj = nn.Sequential(
nn.Dropout(p=dropout_p),
nn.Linear(embedding_dim, hidden_size, bias=False)
)
self.sentence_encoder = SentenceEncoderRNN(
vocab_size,
hidden_size,
input_dropout_p=dropout_p,
dropout_p=dropout_p,
n_layers=s_layers,
bidirectional=True,
rnn_cell='lstm'
)
self.bbox_location_encoding = nn.Linear(6, 64)
self.pos_location_encoding = PositionEncoding(n_filters=64, max_len=self.v_max_len)
self.appr_local_proj = nn.Linear(hidden_size+128, hidden_size)
self.motion_local_proj = nn.Linear(hidden_size+128, hidden_size)
self.pos_enc = PositionEncoding(n_filters=512, max_len=self.v_max_len)
self.appr_v = nn.Linear(hidden_size*2, hidden_size)
self.motion_v = nn.Linear(hidden_size*2, hidden_size)
self.appr_adj = VideoAdjLearner(hidden_size, hidden_size)
self.appr_gcn = GCN(hidden_size, hidden_size, hidden_size, num_layers=gcn_layers)
self.motion_adj = VideoAdjLearner(hidden_size, hidden_size)
self.motion_gcn = GCN(hidden_size, hidden_size, hidden_size, num_layers=gcn_layers)
self.res_layernorm = nn.LayerNorm(hidden_size, elementwise_affine=False)
self.i3d_layernorm = nn.LayerNorm(hidden_size, elementwise_affine=False)
self.appr_vq_interact = BAN(hidden_size, glimpse=4)
self.motion_vq_interact = BAN(hidden_size, glimpse=4)
self.motion_appr_fusion = MotionApprFusion(hidden_size, hidden_size, n_layer=1)
self.attflat = AttFlat(hidden_size, hidden_size, 1, hidden_size)
if answer_vocab_size is not None:
self.fc = nn.Linear(hidden_size, answer_vocab_size)
else:
self.fc = nn.Linear(hidden_size, 1)
def forward(self, task, *args):
# expected sentence_inputs is of shape (batch_size, sentence_len, 1)
# expected video_inputs is of shape (batch_size, frame_num, video_feature)
self.task = task
if task == 'Count':
return self.forward_count(*args)
elif task == 'FrameQA':
return self.forward_frameqa(*args)
elif task == 'Action' or task == 'Trans':
return self.forward_trans_or_action(*args)
elif task == 'MS-QA':
return self.forward_msqa(*args)
def model_block(self, res_avg_inp, i3d_avg_inp, res_obj_inp, bbox_inp, i3d_obj_inp,
video_length, all_sen_inputs, all_ques_length):
q_mask = self.make_mask(all_sen_inputs, all_ques_length)
v_mask = self.make_mask(res_avg_inp[:,:,0], video_length)
q_emb = F.relu(self.embedding_proj(self.glove(all_sen_inputs))) # b, q_len, d
q_output, q_hidden = self.sentence_encoder(q_emb, input_lengths=all_ques_length)
q_hidden = q_hidden.squeeze()
bsz, v_len, obj_num = res_obj_inp.size(0), res_obj_inp.size(1), res_obj_inp.size(2)
q_len = q_output.size(1)
q_mask = q_mask[:,:q_len]
# make local and global feature
res_obj_inp = self.compress_appr_local(res_obj_inp) # b, v_len, N, d
i3d_obj_inp = self.compress_motion_local(i3d_obj_inp) # b, v_len, N, d
res_avg_inp = self.compress_appr_global(res_avg_inp) # b, v_len, d
i3d_avg_inp = self.compress_motion_global(i3d_avg_inp) # b, v_len, d
bbox_inp = self.bbox_location_encoding(bbox_inp) # b, v_len, N, d/8
pos_inp = self.pos_location_encoding(res_obj_inp.contiguous().view(bsz*obj_num, v_len, -1)) # 1, v_len, 64
pos_inp = pos_inp.unsqueeze(2).expand(bsz, v_len, obj_num, 64) * v_mask.unsqueeze(2).unsqueeze(3) # b, v_len, N, d/8
appr_local = self.appr_local_proj(torch.cat([res_obj_inp, bbox_inp, pos_inp], dim=3)) # b, v_len, N, d
motion_local = self.motion_local_proj(torch.cat([i3d_obj_inp, bbox_inp, pos_inp], dim=3)) # b, v_len, N, d
v_len = appr_local.size(1)
appr_local = appr_local.contiguous().view(bsz*v_len, obj_num, self.hidden_size)
motion_local = motion_local.contiguous().view(bsz*v_len, obj_num, self.hidden_size)
res_avg_inp = self.pos_enc(res_avg_inp) + res_avg_inp
res_avg_inp = res_avg_inp.contiguous().view(bsz*v_len, self.hidden_size)
res_avg_inp = res_avg_inp.unsqueeze(1).expand_as(appr_local)
appr_v = self.appr_v(torch.cat([appr_local, res_avg_inp], dim=-1))
i3d_avg_inp = self.pos_enc(i3d_avg_inp) + i3d_avg_inp
i3d_avg_inp = i3d_avg_inp.contiguous().view(bsz*v_len, self.hidden_size)
i3d_avg_inp = i3d_avg_inp.unsqueeze(1).expand_as(motion_local)
motion_v = self.motion_v(torch.cat([motion_local, i3d_avg_inp], dim=-1))
appr_v = appr_v.contiguous().view(bsz, v_len*obj_num, self.hidden_size)
motion_v = motion_v.contiguous().view(bsz, v_len*obj_num, self.hidden_size)
v_mask_expand = v_mask[:,:v_len].unsqueeze(2).expand(bsz, v_len, obj_num).contiguous().view(bsz, v_len*obj_num)
# object graph convolution
appr_adj = self.appr_adj(appr_v, v_mask_expand)
appr_gcn = self.appr_gcn(appr_v, appr_adj) # b, v_len*obj_num, d
motion_adj = self.motion_adj(motion_v, v_mask_expand)
motion_gcn = self.motion_gcn(motion_v, motion_adj) # b, v_len*obj_num, d
# vq interaction
appr_vq, _ = self.appr_vq_interact(appr_gcn, q_output, v_mask_expand, q_mask)
motion_vq, _ = self.motion_vq_interact(motion_gcn, q_output, v_mask_expand, q_mask)
# motion-appr fusion
U = torch.cat([appr_vq, motion_vq], dim=1) # b, 2*q_len, d
q_mask_ = torch.cat([q_mask, q_mask], dim=1)
U_mask = torch.matmul(q_mask_.unsqueeze(2), q_mask_.unsqueeze(2).transpose(1, 2))
fusion_out = self.motion_appr_fusion(U, q_hidden, U_mask)
fusion_out = self.attflat(fusion_out, q_mask_)
out = self.fc(fusion_out).squeeze()
return out
def make_mask(self, seq, seq_length):
mask = seq
mask = mask.data.new(*mask.size()).fill_(1)
for i, l in enumerate(seq_length):
mask[i][min(mask.size(1)-1, l):] = 0
mask = Variable(mask) # b, seq_len
mask = mask.to(torch.float)
return mask
def forward_count(
self, res_avg_inp, i3d_avg_inp, res_obj_inp, bbox_inp, i3d_obj_inp, video_length,
all_sen_inputs, all_ques_length, answers):
# out of shape (batch_size, )
out = self.model_block(
res_avg_inp, i3d_avg_inp, res_obj_inp, bbox_inp, i3d_obj_inp, video_length,
all_sen_inputs, all_ques_length)
predictions = torch.clamp(torch.round(out), min=1, max=10).long()
# answers of shape (batch_size, )
return out, predictions, answers
def forward_frameqa(
self, res_avg_inp, i3d_avg_inp, res_obj_inp, bbox_inp, i3d_obj_inp, video_length,
all_sen_inputs, all_ques_length, answers, answer_type):
# out of shape (batch_size, num_class)
out = self.model_block(
res_avg_inp, i3d_avg_inp, res_obj_inp, bbox_inp, i3d_obj_inp, video_length,
all_sen_inputs, all_ques_length)
_, max_idx = torch.max(out, 1)
# (batch_size, ), dtype is long
predictions = max_idx
# answers of shape (batch_size, )
return out, predictions, answers
def forward_trans_or_action(
self, res_avg_inp, i3d_avg_inp, res_obj_inp, bbox_inp, i3d_obj_inp, video_length,
all_cand_inputs, all_cand_length, answers, row_index):
all_cand_inputs = all_cand_inputs.permute(1, 0, 2)
all_cand_length = all_cand_length.permute(1, 0)
all_out = []
for idx in range(5):
# out of shape (batch_size, )
out = self.model_block(
res_avg_inp, i3d_avg_inp, res_obj_inp, bbox_inp, i3d_obj_inp, video_length,
all_cand_inputs[idx], all_cand_length[idx])
all_out.append(out)
# all_out of shape (batch_size, 5)
all_out = torch.stack(all_out, 0).transpose(1, 0)
_, max_idx = torch.max(all_out, 1)
# (batch_size, )
predictions = max_idx
# answers of shape (batch_size, )
return all_out, predictions, answers
def forward_msqa(
self, res_avg_inp, i3d_avg_inp, res_obj_inp, bbox_inp, i3d_obj_inp, video_length,
all_sen_inputs, all_ques_length, answers):
# out of shape (batch_size, num_class)
out = self.model_block(
res_avg_inp, i3d_avg_inp, res_obj_inp, bbox_inp, i3d_obj_inp, video_length,
all_sen_inputs, all_ques_length)
_, max_idx = torch.max(out, 1)
# (batch_size, ), dtype is long
predictions = max_idx
# answers of shape (batch_size, )
return out, predictions, answers
|
#!/usr/bin/env python3
import sys, base64
import xml.etree.ElementTree as ET
import xml.dom.minidom as md
from struct import unpack, unpack_from, iter_unpack
from pprint import pprint
from collections import defaultdict, namedtuple
Point = namedtuple("Point", "x y z")
class DataElement(ET.Element):
def __init__(self, tag, attrib={}):
ET.Element.__init__(self, tag, attrib=attrib)
self.data = None
def __setattr__(self, name, value):
if name is "data":
self.__dict__["data"] = value
else:
ET.Element.__setattr__(self, name, value)
def __getattr__(self, name):
if name is "data":
return self.__dict__["data"]
else:
return ET.Element.__getattr__(self, name)
def element(name, attrib={}, text=None):
elem = DataElement(name, attrib)
if not text is None:
elem.text = str(text)
return elem
def parse_string(name, node, data):
node.text = data[0:data.index(0)].decode()
def parse_dirn(name, node, data):
node.text = data[0:data.index(0)].decode()
data = data[data.index(0):]
#whoever wrote the DIRN exporter doesn't memset() first
#perhaps interesting leak in 260 byte blobs?
#with open("/tmp/leaklog", "ab") as leaklog:
# leaklog.write(data)
def parse_int32(name, node, data):
node.text = str(unpack("<I", data)[0])
node.data = unpack("<I", data)[0]
def parse_6int32(name, node, data):
node.data = [Point(*x) for x in iter_unpack("<3i", data)]
for point in node.data:
corner = element("corner")
for k, v in zip(point._fields, point):
corner.append(element(k, text=str(v)))
node.append(corner)
def parse_pos(name, node, data):
node.data = Point(*unpack("<3i", data))
for k, v in zip(node.data._fields, node.data):
node.append(element(k, text=str(v)))
def parse_lfil(name, node, data):
node.data = [name[0].decode().strip('\x00') for name in iter_unpack("260s", data)]
for idx, name in enumerate(node.data):
node.append(element("name", attrib={"id": str(idx)}, text=name))
def parse_raw(name, node, data):
node.text = ''.join(format(x, '02x') for x in data)
node.data = data
def parse_unknown(name, node, data):
sys.stderr.write("Unknown block type: %s\n" % str(name))
parse_raw(name, node, data)
def parse_group(name, node, data):
for child in readGroup(data):
node.append(child)
def constant_factory(value):
return lambda: value
parsers = defaultdict(constant_factory(parse_unknown))
parsers.update({
"FILN": parse_string,
"DIRN": parse_dirn,
"RADI": parse_int32,
"IDNB": parse_int32,
"IDTY": parse_int32,
"BRIT": parse_int32,
"SELE": parse_int32,
"SCAL": parse_int32,
"AMBI": parse_int32,
"IDFI": parse_int32,
"BITS": parse_int32,
"WATR": parse_int32,
"BBOX": parse_6int32,
"POSI": parse_pos,
"OFST": parse_pos,
"CENT": parse_pos,
"ANGS": parse_pos,
"LFIL": parse_lfil,
"RAWD": parse_raw,
"GNRL": parse_group,
"TEXI": parse_group,
"STRU": parse_group,
"SNAP": parse_group,
"VIEW": parse_group,
"CTRL": parse_group,
"LINK": parse_group,
"OBJS": parse_group,
"OBJD": parse_group,
"LITS": parse_group,
"LITD": parse_group,
"FLAS": parse_group,
"FLAD": parse_group,
})
def blocks(data):
while len(data) > 8:
name, length = unpack_from('<4sI', data, 0)
childdata = data[8:8+length]
yield (name.decode(), length, childdata)
data = data[8+length:]
def readGroup(data):
for name, length, data in blocks(data):
node = element(name, attrib={"_length": str(length)})
parsers[name](name, node, data)
yield node
def BS6File(data):
return [x for x in readGroup(data)][0]
def readBS6File(path):
with open(sys.argv[1], "rb") as fd:
data = fd.read()
return BS6File(data)
if __name__ == "__main__":
node = readBS6File(sys.argv[1])
text = md.parseString(ET.tostring(node)).toprettyxml()
if len(sys.argv) < 3:
print(text)
exit()
with open(sys.argv[2], "w") as fd:
fd.write(text)
|
import sys
import os
import json
from pathlib import Path
SETTINGS = {
'editor.renderWhitespace': 'boundary',
}
def _conf_exit(code, rm=False):
input('\n処理を終了します。メッセージを確認してEnterを押してください。')
if rm: os.remove(__file__)
sys.exit(code)
def get_vscode_settings():
user = os.getenv('USERPROFILE')
if not user:
print('環境変数 USERPROFILE が見つかりません。')
_conf_exit(1)
return Path(user) / 'AppData' / 'Roaming' / 'Code' / 'User' / 'settings.json'
def merge_settings(path):
if not path.exists(): return SETTINGS
current_settings = json.loads(path.read_text(encoding='utf-8'))
return {**current_settings, **SETTINGS}
def write_settings(path, settings):
data = json.dumps(settings, indent=4, ensure_ascii=False)
path.write_text(data, encoding='utf-8')
def print_settings(settings):
print('以下の項目を設定しました。')
for k, v in settings.items():
print(f'{k}: {v}')
def main():
print('VSCodeのセットアップを行います…\n')
settings_path = get_vscode_settings()
settings = merge_settings(settings_path)
write_settings(settings_path, settings)
print_settings(SETTINGS)
_conf_exit(0, True)
if __name__ == "__main__":
main()
|
from libraries import *
from preprocess import preprocess_
#load the train file tokens
with open(r'../data/train_stem.txt','r',encoding='utf-8') as file_:
train_stemm = file_.read().splitlines()
train_stem=[]
for i in train_stemm:
train_stem.append(i.split())
#make a single list of all the tokens and pass it to counter to get count of each token
temp = []
for token in train_stem:
temp = temp + token
count_dict = Counter(temp)
#below script creates dataframes for every topic containing sentences falling in that topic
'''topic_df = pd.read_csv(r'../data/generated/topic_df.csv')
print(topic_df.head())
for i in range(7):
df = topic_df[topic_df['topic']==i]
df.dropna().to_csv(r'../data/generated/topic'+str(i)+'.csv',index=False)'''
#read test data and clean the message column
test = pd.read_csv(r'../data/eval_data.csv')
test_msgs = test.Message.tolist()
stemmed_test_tokens = preprocess_(test_msgs)
temp = [' '.join(i) for i in stemmed_test_tokens]
X_test = pd.Series(temp)
vocablist = [key for key in count_dict if count_dict[key]>5]
hb = HBOS()
topic_outlier=[]
vectorizer = CountVectorizer(vocabulary=vocablist)
#below loop iterates for every topic, transforms using countvectorizer
#then it fits hbos model on transformed data and predicts decision score of each sentence,
#finds 75% quantile value which will be our threshold if decision score of a sentence from test set lies below threshold
#for any of the topic then it is a inlier
for i in range(7):
X = vectorizer.transform(pd.read_csv(r'../data/generated/topic'+str(i)+'.csv')['text'])
hb.fit(X.toarray())
df = hb.decision_function(X.toarray())
threshold = np.quantile(df,0.75)
#plt.hist(df*-1,bins=50)
temp_ = vectorizer.transform(X_test)
sdf = hb.decision_function(temp_.toarray())
topic_outlier.append([0 if i<threshold else 1 for i in sdf ])
#plt.show()
out_frame = pd.DataFrame(topic_outlier).T
y_test=test['Outlier?'].tolist()
y_pred = []
for row in range(out_frame.shape[0]):
for i in np.array(out_frame.iloc[row,:]):
if i==1:
temp_ = 1
if i==0:
temp_ = 0
break
#y_pred.append(scipy.stats.mode(np.array(out_frame.iloc[row,:]))[0][0])
y_pred.append(temp_)
y_pred_ = [True if i==1 else False for i in y_pred]
#model scores
acc_score = metrics.accuracy_score(y_test,y_pred_)
print('\nmodel accuracy score ',acc_score)
prec_score = metrics.precision_score(y_test,y_pred_)
print('\nmodel precision score ',prec_score) |
'''
databases
=========
Access to all the database stores.
'''
from .block_followers import (
ACCOUNTS_PROCESSED,
FOLLOWERS_SEEN,
FOLLOWERS_BLOCKED
)
from .block_media_replies import (
TWEETS_PROCESSED,
REPLIES_PROCESSED,
REPLIERS_SEEN,
REPLIERS_BLOCKED
)
def get_databases():
'''Get a map of all the databases by-module.'''
return {
'block_followers': {
'accounts_processed': ACCOUNTS_PROCESSED,
'followers_seen': FOLLOWERS_SEEN,
'followers_blocked': FOLLOWERS_BLOCKED,
},
'block_media_replies': {
'tweets_processed': TWEETS_PROCESSED,
'replies_processed': REPLIES_PROCESSED,
'repliers_seen': REPLIERS_SEEN,
'repliers_blocked': REPLIERS_BLOCKED
}
}
|
from django import forms
class UserInfoSearchForm(forms.Form):
real_name = forms.CharField(max_length=50, required=False)
sex = forms.CharField(max_length=50,required=False)
age = forms.IntegerField(help_text='年龄', required=False)
email = forms.EmailField(max_length=50, required=False)
# phone = forms.CharField(max_length=50, help_text='手机')
birthday = forms.DateField(help_text="出生日期", required=False)
id_number = forms.CharField(max_length=20, help_text='身份证件号码信息',required=False)
work_unit = forms.CharField(max_length=100, help_text='工作单位信息,可为空!', required=False)
contact_number = forms.CharField(max_length=50, help_text='联系电话',required=False)
start_working_date = forms.DateField(help_text="起始工作时间", required=False)
working_year = forms.IntegerField(help_text='小于10个月,默认为小数年。如果大于10个月约等于1年,参加工作年限', required=False)
address = forms.CharField(max_length=200, help_text='常住住址',required=False)
id_card_address = forms.CharField(max_length=200, help_text='身份证住址',required=False)
postal_code = forms.CharField(max_length=20, help_text='邮政编码', required=False)
political_status = forms.CharField(max_length=20,required=False)
# 外键信息
two_inch_photo = forms.IntegerField(required=False)
# 后加的
person_in_charge = forms.CharField(max_length=20, help_text='单位报名负责人',required=False)
nation_info = forms.IntegerField(help_text="民族信息", required=False)
fixed_telephone = forms.CharField(max_length=20, help_text='固定电话,带区号', required=False)
unit_address = forms.CharField(max_length=200, help_text='工作单位地址', required=False)
unit_nature = forms.IntegerField(help_text='单位性质', required=False)
hukou_province = forms.IntegerField(required=False)
hukou_city = forms.IntegerField(required=False)
hukou_county = forms.IntegerField(required=False)
education_degree = forms.IntegerField(help_text='文化程度', required=False)
middle_school = forms.CharField(max_length=50, help_text='初级中学', required=False)
main_occupation = forms.CharField(max_length=50, help_text='从事职业', required=False)
teacher_info = forms.IntegerField(help_text='单位报名的负责人信息', required=False)
|
# Generated by Django 3.1.5 on 2021-01-17 19:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import simple_history.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TranslationGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_en', models.CharField(blank=True, max_length=255)),
('name_pl', models.CharField(blank=True, max_length=255)),
('name_de', models.CharField(blank=True, max_length=255)),
('name_ru', models.CharField(blank=True, max_length=255)),
('order_index', models.IntegerField(default=0)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='translations.translationgroup')),
],
),
migrations.CreateModel(
name='Translation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.TextField(db_index=True, max_length=2000)),
('value_en', models.TextField(blank=True, max_length=2000)),
('value_pl', models.TextField(blank=True, max_length=2000)),
('value_de', models.TextField(blank=True, max_length=2000)),
('value_ru', models.TextField(blank=True, max_length=2000)),
('state_en', models.CharField(choices=[('NEW', 'New'), ('TODO', 'To do'), ('READY_TO_REVIEW', 'Ready to review'), ('NEEDS_WORK', 'Needs work'), ('ACCEPTED', 'Accepted')], default='NEW', max_length=255)),
('state_pl', models.CharField(choices=[('NEW', 'New'), ('TODO', 'To do'), ('READY_TO_REVIEW', 'Ready to review'), ('NEEDS_WORK', 'Needs work'), ('ACCEPTED', 'Accepted')], default='NEW', max_length=255)),
('state_de', models.CharField(choices=[('NEW', 'New'), ('TODO', 'To do'), ('READY_TO_REVIEW', 'Ready to review'), ('NEEDS_WORK', 'Needs work'), ('ACCEPTED', 'Accepted')], default='NEW', max_length=255)),
('state_ru', models.CharField(choices=[('NEW', 'New'), ('TODO', 'To do'), ('READY_TO_REVIEW', 'Ready to review'), ('NEEDS_WORK', 'Needs work'), ('ACCEPTED', 'Accepted')], default='NEW', max_length=255)),
('order_index', models.IntegerField(default=0)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='translations.translationgroup')),
],
),
migrations.CreateModel(
name='HistoricalTranslationGroup',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('name_en', models.CharField(blank=True, max_length=255)),
('name_pl', models.CharField(blank=True, max_length=255)),
('name_de', models.CharField(blank=True, max_length=255)),
('name_ru', models.CharField(blank=True, max_length=255)),
('order_index', models.IntegerField(default=0)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('parent', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='translations.translationgroup')),
],
options={
'verbose_name': 'historical translation group',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalTranslation',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('key', models.TextField(db_index=True, max_length=2000)),
('value_en', models.TextField(blank=True, max_length=2000)),
('value_pl', models.TextField(blank=True, max_length=2000)),
('value_de', models.TextField(blank=True, max_length=2000)),
('value_ru', models.TextField(blank=True, max_length=2000)),
('state_en', models.CharField(choices=[('NEW', 'New'), ('TODO', 'To do'), ('READY_TO_REVIEW', 'Ready to review'), ('NEEDS_WORK', 'Needs work'), ('ACCEPTED', 'Accepted')], default='NEW', max_length=255)),
('state_pl', models.CharField(choices=[('NEW', 'New'), ('TODO', 'To do'), ('READY_TO_REVIEW', 'Ready to review'), ('NEEDS_WORK', 'Needs work'), ('ACCEPTED', 'Accepted')], default='NEW', max_length=255)),
('state_de', models.CharField(choices=[('NEW', 'New'), ('TODO', 'To do'), ('READY_TO_REVIEW', 'Ready to review'), ('NEEDS_WORK', 'Needs work'), ('ACCEPTED', 'Accepted')], default='NEW', max_length=255)),
('state_ru', models.CharField(choices=[('NEW', 'New'), ('TODO', 'To do'), ('READY_TO_REVIEW', 'Ready to review'), ('NEEDS_WORK', 'Needs work'), ('ACCEPTED', 'Accepted')], default='NEW', max_length=255)),
('order_index', models.IntegerField(default=0)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('parent', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='translations.translationgroup')),
],
options={
'verbose_name': 'historical translation',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
|
def run():
my_list = [1, 'Hello', True, 4.5]
my_dict = {'firstname': 'Hector', 'lastname': "Olvera"}
super_list = [
{"firstname": 'Facundo', 'lastname': 'García'},
{'firstname': 'Miguel', 'lastname': 'Torres'},
{'firstname': 'Pepe', 'lastname': 'Rodelo'},
]
super_dict = {
"natural_nums": [0, 1, 2, 3],
"interger_nums": [-2, -1, 0, 1, 2],
"floating_nums": [1.1, -2.3, 4.5],
}
for key, value in super_dict.items():
print(key, '-', value)
if(__name__ == '__main__'):
run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
from unittest.mock import Mock
import pytest
from click.testing import CliRunner
from yeahyeah_plugins.path_item_plugin.core import PathItemPlugin
from yeahyeah.core import YeahYeah
@pytest.fixture(autouse=True)
def disable_click_echo(monkeypatch):
"""Don't print click.echo to console. Click runner disables this, but not
all tests use click runner to invoke all commands. So this is needed
"""
monkeypatch.setattr("yeahyeah.core.click.echo", Mock())
def test_path_item_plugin(path_item_list, tmpdir):
yeahyeah = YeahYeah(configuration_path=tmpdir)
plugin = PathItemPlugin(item_list=path_item_list)
yeahyeah.add_plugin_instance(plugin)
assert len(plugin.get_commands()) == 2
def test_path_item_plugin_no_file(path_item_list, tmpdir):
plugin = PathItemPlugin(item_list=path_item_list)
plugin.assert_config_file(Path(tmpdir) / "path_item_config_test.txt")
def test_url_pattern_plugin_admin_add__remove_list_record(yeahyeah_instance):
runner = CliRunner()
path_item_plugin = yeahyeah_instance.plugins[1]
assert len(path_item_plugin.item_list) == 2
response = runner.invoke(
yeahyeah_instance.admin_cli, "path_items add a_path /test/something/folder"
)
assert response.exit_code == 0
assert len(path_item_plugin.item_list) == 3
response = runner.invoke(yeahyeah_instance.admin_cli, "path_items list")
assert response.exit_code == 0
response = runner.invoke(
yeahyeah_instance.admin_cli, "path_items remove a_path".split(" ")
)
assert response.exit_code == 0
assert len(path_item_plugin.item_list) == 2
|
from .upload import PharmgkbUploader
from .dump import PharmgkbDumper
|
# Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
"""Sets up a standard logging format and setting"""
import logging
LIB_LEVELS = {"asyncio": logging.WARNING}
LOGGER_FORMAT = "%(levelname)s %(asctime)s %(name)s %(module)s %(pathname)s %(message)s"
logging.basicConfig(level=logging.INFO, format=LOGGER_FORMAT)
for lib, level in LIB_LEVELS.items():
logging.getLogger(lib).setLevel(level)
def get_logger(name):
"""Return the logger
Written to match the standard python logging.getLogger
function for ease of migration
"""
logger = logging.getLogger(name)
return logger
|
import csv
with open('Depth Data/KinectDataRaw.csv', 'r', newline='') as file:
reader = csv.reader(file)
fileWriter = open("Depth Data/KinectDataCorrect.csv", "w")
index = 0
reverseSpot = 487 #CHANGE
for row in reader:
target = row.pop(0) #Remove the target
row = list(map(int, row))
index+=1
if index >= reverseSpot:
row.reverse()
fileWriter.write(target + ", " + str(row).strip('[]') + "\n")
fileWriter.close() |
# -*- coding:utf8 -*-
""" Lexical analysis is the process of analyzing a stream of individual characters (normally arranged as lines),
into a sequence of lexical tokens (tokenization. for instance of "words" and punctuation symbols that make up source code)
to feed into the parser. Roughly the equivalent of splitting ordinary text written in a natural language (e.g. English)
into a sequence of words and punctuation symbols. Lexical analysis is often done with tools such as lex, flex and jflex.
Strictly speaking, tokenization may be handled by the parser. The reason why we tend to bother with tokenising in practice
is that it makes the parser simpler, and decouples it from the character encoding used for the source code.
"""
from . import token_type
from . import token
from . import lexer
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 21 09:21:55 2016
@author: Katherine
"""
# -*- coding: utf-8 -*-
"""
Alpine Tundra Model
Created on Sun Mar 27 15:51:07 2016
@author: Katherine
"""
from Parameters_with_Overland_Flow import *
import numpy as np
def Alpine_Tundra_Model(t,y,SWCSOIL,TSOIL,thetaS_mod, LAI, fVl, fVr, fVw, ZS, BD, Vw, R_SOC, delta_Vl, delta_Vr, delta_Vw, delta_LC, delta_SC, xi_Vl, xi_Vr, xi_Vw, xi_LC, xi_SC, KSc, fDOC, KL, KVm, KVl, KVr, KPp, KPw):
#Inital Conditions
Vl=y[0] #leaf carbon
Vw=y[1] #wood carbon
Vr=y[2] #root carbon
LC=y[3] #litter carbon
SC=y[4] #soil carbon
R_SOC=y[5] #recalcitrant carbon-->not used in model
DOC=y[6] #dissolved organic carbon
NSavail=y[7] #dissolved inorganic nitrogen
PSavail=y[8] #dissolved inorganic phosphorus
PSpm=y[9] #phosphorus in parent materials
#Day of the Year (0-365; 0 is equal to 366)
nday=int(t%DOY) #check to make sure this works
#Do not run model for timestep if environmental conditions are null
if SOLAR[nday]==-999:
print (nday+1)
return
if TAIR[nday]==-999:
print (nday+1)
return
#Set Control Environmental Conditions
Ta=TAIR[nday]
thetaS=SWCSOIL[nday]
Ts=TSOIL[nday]
#Scale moisture and temperature by modeled moisture in Overland Flow Model
if nday+1>=GS0 and nday+1<=GS1:
thetaS_model=thetaS_mod[int((GSL-1)-(GS1-(nday+1))+1)] #substitute thetaS from overland flow model
SWCfrac_mod_obs=thetaS_model/thetaS #ratio between modeled and obseved soil moisture (given air temp)
thetaS=thetaS_model #make soil moisture in the model the same as overland flow modeled soil moisture
Ts=Ts/SWCfrac_mod_obs #scale soil temperature by soil moisture
#Leaching
if nday+1>=GS0 and nday+1<=GS1:
Qlch=0.001252566*thetaS
else:
Qlch=0.0
#Porosity
Poro=1.0-BD/PD
#Maximum GPP
UPAR=SOLAR[nday]*0.5
fPAR=(1.0-np.exp(0.5*(-LAI)))
GPP_max=UPAR*fPAR*epsilon
#Soil Temperature Constraint on GPP
GPP_Temp=(Tmax-Ta)/(Tmax-Topt)*((Ta/Topt)**(Topt/(Tmax-Topt)))
if Ta<0.0:
GPP_Temp=0.0
#Soil Moisture Constraint on GPP
SA_eff=(thetaS-PAW)/(Poro-PAW) #effective soil saturation
if SA_eff<=0.0:
SA_eff=0.0
if SA_eff>=1.0:
SA_eff=0.99
GPP_SM=-4.0*SA_eff*SA_eff+4.0*SA_eff
if (nday+1)<GS0 or (nday+1)>GS1: #set moisture limitation to zero during winter
GPP_SM=1.0
#Soil Nutrient Constraint on GPP
GPP_T_SM=GPP_Temp*GPP_max*GPP_SM #constrained GPP with soil moisture and temperature but not nutrients
if GPP_T_SM>0.0:
GPP_N_lim=thetaS*NSavail*ZS/(GPP_T_SM*(fVl/delta_Vl+fVw/delta_Vw+fVr/delta_Vr))
GPP_P_lim=thetaS*PSavail*ZS/(GPP_T_SM*(fVl/xi_Vl+fVw/xi_Vw+fVr/xi_Vr))
if (GPP_N_lim<GPP_P_lim):
minimum=GPP_N_lim
else:
minimum=GPP_P_lim
if minimum<1.0:
GPP_nut=minimum
else:
GPP_nut=1.0
if GPP_nut<0.0:
GPP_nut=0.0
#Calculate GPP
GPP=GPP_T_SM*GPP_nut
else:
GPP=0.0
GPP_N_lim=1.0 #not limiting if GPP_T_SM<0.0 because temperature and moisture are primary limiting factors
GPP_P_lim=1.0 #not limiting if GPP_T_SM<0.0 because temperature and moisture are primary limiting factors
#Soil Saturation
SA=thetaS/Poro
#Function of Temperature and Moisture
F_TM=(0.642*0.642-(SA-0.642)*(SA-0.642))*1.514*np.exp(0.048*Ts)
if Ts<=0.0:
F_TM=0.0
#Temperature Function
F_T=np.exp(0.0693*Ta)
if Ta<0.0:
F_T=0.0
#Vegetation Carbon
VC=Vl+Vw+Vr
#Decomposition of Litter Carbon
LC_decomp=KL*LC*F_TM
#Autotrophic Respiration
RVm=KVm*VC*F_T #vegetation maintenance respiration
NPP_Vm=GPP-RVm #NPP taking into account only maintenance respiration, used to determine growth respiration
if NPP_Vm>0:
RVg=GR*NPP_Vm #vegetation growth respiration
else:
RVg=0.0
RA=RVm+RVg #total autotrophic respiration
#Calculate NPP
NPP=GPP-RA
if NPP<0.0:
NPP=0.0
RA=GPP
#Nitrogen & Phosphorus Precipitation/Deposition
Nppt=N_PPT/DOY #converts g N m-2 yr-1 to g N m-2 day-1
Pppt=P_PPT/DOY #converts g N m-2 yr-1 to g N m-2 day-1
#Nitrogen & Phosphorus Fertilization
if FTLZ_N=="false":
N_fer=0
elif FTLZ_N=="true":
if t>DOY*(RUNY-1):
if nday+1>=GS0 and nday+1<=GS1:
N_fer=NFTLZ/GSL
else:
N_fer=0
else:
N_fer=0
if FTLZ_P=="false":
P_fer=0
elif FTLZ_P=="true":
if t>DOY*(RUNY-1):
if nday+1>=GS0 and nday+1<=GS1:
P_fer=PFTLZ/GSL
else:
P_fer=0
else:
P_fer=0
#Mass Balance of DOC
DOCp=fDOC*BD*ZS*SC*KSc*F_TM #DOC production
DOCRh=thetaS*ZS*DOC*KDOC*F_TM #DOC loss due to decomposition
DOClch=Qlch*thetaS*DOC #DOC leaching
#Mass Balance of Soil Inorganic Nitrogen
NSnet=(1.0/delta_LC-fLc/delta_SC)*LC_decomp+fDIN*BD*ZS*SC*KSc*F_TM/delta_SC #net N mobilization/mineralization
NSupt=(fVl/delta_Vl+fVw/delta_Vw+fVr/delta_Vr)*NPP #N uptake by vegetation
NSlch=Qlch*thetaS*NSavail #N leaching
NSdpt=Nppt #N deposition
#Mass Balance of Soil Inorganic Phosphorus
PSnet=(1.0/xi_LC-fLc/xi_SC)*LC_decomp+fDIP*BD*ZS*SC*KSc*F_TM/xi_SC #net P mobilization/mineralization
PSupt=(fVl/xi_Vl+fVw/xi_Vw+fVr/xi_Vr)*NPP #P uptake by vegetation
PSppt=thetaS*ZS*KPp*PSavail*F_TM #P mineral precipitation
PSwthr=KPw*PSpm*F_TM #P weathering
PSlch=Qlch*thetaS*PSavail #P leaching
PSdpt=Pppt #P deposition
#Distribution Coefficient of DOC (other distribution coefficients are in the parameters file, but need to establish BD before determining this one)
dDOC=0.6*1.0/BD #(m3 g-1) linear distribution coefficent of DOC between sorbed and aqueous phase
#Model ODEs
f0=fVl*NPP-KVl*Vl #Vl
f1=0.0 #Vw-->no change
f2=fVr*NPP-KVr*Vr #Vr
f3=-LC_decomp+KVl*Vl+KVr*Vr #LC
f4=(fLc*LC_decomp-BD*ZS*SC*KSc*F_TM)/(BD*ZS) #SOC
f5=(BD*ZS*SC*KSc*F_TM) #recalcitrant SOC-->DOESN"T DO ANYTHING IN MODEL
f6=(DOCp-DOCRh-DOClch)/(thetaS*ZS + BD*ZS*dDOC) #DOC
f7=(NSnet-NSupt-NSlch+NSdpt+N_fer)/(thetaS*ZS+BD*ZS*dN) #DIN
f8=(PSnet-PSupt-PSppt-PSlch+PSdpt+PSwthr+P_fer)/(thetaS*ZS+BD*ZS*dP) #DIP
f9=0.0 #PM-->no change
# if t==0.0 or t>365.0:
# print ("t=%.3f\t\tnday=%d\t\tN_change=%.6f\t\tNSavail=%.6f\t\tNSnet=%.6f\t\tNSupt=%.6f\t\tNSlch=%.6f" %(t,nday,(NSnet-NSupt-NSlch+NSdpt+N_fer)/(thetaS*ZS+BD*ZS*dN),NSavail,NSnet,NSupt,NSlch) )
#
#Final Output of ODEs for Each Timestep--Use as y values for Next Model Run Timestep
return [f0,f1,f2,f3,f4,f5,f6,f7,f8,f9]
|
#!/usr/bin/python
import os
import socket
import sys
import signal
if len(sys.argv) == 1:
sys.exit('usage: %s [--server directory] args...' % sys.argv[0])
if sys.argv[1] == '--server':
dir = sys.argv[2]
del sys.argv[1:3]
else:
dir = '.'
stdin_path = os.readlink('/proc/self/fd/0')
stdout_path = os.readlink('/proc/self/fd/1')
stderr_path = os.readlink('/proc/self/fd/2')
def connect():
sock = socket.socket(socket.AF_UNIX)
sock.connect('%s/.pool/serv' % dir)
return sock
try:
sock = connect()
except socket.error, err:
import errno
import subprocess
import time
if err.errno == errno.ECONNREFUSED:
server_path = os.path.join(os.path.dirname(__file__), 'server.py')
os.environ['CLASSPATH'] = open('%s/.pool/cp' % dir).read()
if sys.stderr.isatty():
print >>sys.stderr, 'starting Clojure Pool server on `screen` with CLASSPATH=%s' % os.environ['CLASSPATH']
subprocess.check_call(['screen', '-d', '-m', '-S', 'clojurepool', server_path])
for i in xrange(5):
try:
sock = connect()
except socket.error:
time.sleep(0.2)
else:
break
sock = connect()
else:
raise
data = '%s\n%s\n%s\n%d\n%s\n' % (stdin_path, stdout_path, stderr_path, len(sys.argv[1:]), '\n'.join(sys.argv[1:]))
sock.sendall('%d\n%s' % (len(data), data))
pid = int(sock.makefile('r').readline())
try:
status = int(sock.makefile('r').readline())
except KeyboardInterrupt:
os.kill(pid, signal.SIGINT)
print
else:
sys.exit(status)
|
from fastapi import HTTPException
from app.api import database
from app.api.models import categories, CategoryIn, CategoryOut
async def get_category(category_id: int):
"""Get category with set id from database."""
return await database.fetch_one(query=categories.select().where(categories.c.category_id == category_id))
async def get_categories():
"""Get all categories stored in database."""
return await database.fetch_all(query=categories.select())
async def add_category(payload: CategoryIn):
"""Store new category in database."""
query = categories.insert().values(**payload.dict())
return await database.execute(query=query)
async def get_unlisted_category_id():
"""Return id of UNLISTED category.
If category does not exist then create it.
"""
name = 'UNLISTED'
unlisted = await database.fetch_one(query=categories.select().where(categories.c.category_name == name))
if unlisted:
return unlisted['category_id']
unlisted = CategoryIn(category_name=name, description='', picture='')
return await add_category(unlisted)
async def update(payload: dict):
"""Update category with set id in database."""
query = categories.update().where(categories.c.category_id == payload['category_id'])
payload.pop('category_id')
query = query.values(**payload).returning(categories)
return await database.fetch_one(query=query)
@database.transaction()
async def delete(category_id: int):
"""Remove category with set id from database.
Unlink all products connected to that category by replacing category_id with id of unlisted category.
"""
if not await get_category(category_id):
raise HTTPException(status_code=404, detail='Category not found.')
empty = await get_unlisted_category_id()
query = "UPDATE products SET category_id = :empty WHERE category_id = :id"
await database.execute(query=query, values={'empty': empty, 'id': category_id})
query = categories.delete().where(categories.c.category_id == category_id).returning(categories)
return await database.fetch_one(query=query)
|
Subsets and Splits