code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import re
import setuptools
README_FILENAME = "README.md"
VERSION_FILENAME = "observed.py"
VERSION_RE = r"^__version__ = ['\"]([^'\"]*)['\"]"
# Get version information
with open(VERSION_FILENAME, "r") as version_file:
mo = re.search(VERSION_RE, version_file.read(), re.M)
if mo:
version = mo.group(1)
else:
msg = "Unable to find version string in %s." % (version_file,)
raise RuntimeError(msg)
# Get description information
with open(README_FILENAME, "r") as description_file:
long_description = description_file.read()
setuptools.setup(
name="observed",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="Observer pattern for functions and bound methods",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/DanielSank/observed",
py_modules=["observed"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
[
"setuptools.setup"
] |
[((547, 1019), 'setuptools.setup', 'setuptools.setup', ([], {'name': '"""observed"""', 'version': 'version', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'description': '"""Observer pattern for functions and bound methods"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/DanielSank/observed"""', 'py_modules': "['observed']", 'classifiers': "['Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent']"}), "(name='observed', version=version, author='<NAME>',\n author_email='<EMAIL>', description=\n 'Observer pattern for functions and bound methods', long_description=\n long_description, long_description_content_type='text/markdown', url=\n 'https://github.com/DanielSank/observed', py_modules=['observed'],\n classifiers=['Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent'])\n", (563, 1019), False, 'import setuptools\n')]
|
import os
import json
import numpy as np
import pickle
from typing import Any
from pycocotools.coco import COCO
from torch.utils.data import Dataset
class DetectionMSCOCODataset(Dataset):
def __init__(self, annotation_file: str, image_dir: str):
self._annotation_file = annotation_file
self._image_dir = image_dir
self._cache_file = self._annotation_file + ".cache"
self._coco = COCO(self._annotation_file)
self._img_ids = self._coco.getImgIds()
self._cat_ids = self._coco.getCatIds()
self._ann_ids = self._coco.getAnnIds()
self._data = "coco"
self._classes = {
ind: cat_id for ind, cat_id in enumerate(self._cat_ids)
}
self._coco_to_class_map = {
value: key for key, value in self._classes.items()
}
self._load_data()
self._db_inds = np.arange(len(self._image_names))
self._load_coco_data()
def _load_data(self):
print("loading from cache file: {}".format(self._cache_file))
if not os.path.exists(self._cache_file):
print("No cache file found...")
self._extract_data()
with open(self._cache_file, "wb") as f:
pickle.dump([self._detections, self._image_names], f)
print("Cache file created")
else:
with open(self._cache_file, "rb") as f:
self._detections, self._image_names = pickle.load(f)
def _load_coco_data(self):
with open(self._annotation_file, "r") as f:
data = json.load(f)
coco_ids = self._coco.getImgIds()
eval_ids = {
self._coco.loadImgs(coco_id)[0]["file_name"]: coco_id
for coco_id in coco_ids
}
self._coco_categories = data["categories"]
self._coco_eval_ids = eval_ids
def class_name(self, cid):
cat_id = self._classes[cid]
cat = self._coco.loadCats([cat_id])[0]
return cat["name"]
def _extract_data(self):
self._image_names = [
self._coco.loadImgs(img_id)[0]["file_name"]
for img_id in self._img_ids
]
self._detections = {}
for ind, (coco_image_id, image_name) in enumerate(zip(self._img_ids, self._image_names)):
image = self._coco.loadImgs(coco_image_id)[0]
bboxes = []
categories = []
for cat_id in self._cat_ids:
annotation_ids = self._coco.getAnnIds(imgIds=image["id"], catIds=cat_id)
annotations = self._coco.loadAnns(annotation_ids)
category = self._coco_to_class_map[cat_id]
for annotation in annotations:
bbox = np.array(annotation["bbox"])
bbox[[2, 3]] += bbox[[0, 1]]
bboxes.append(bbox)
categories.append(category)
self._detections[image_name] = [{
'bbox': bbox.astype(np.float32),
'category_id': category,
'category_name': self.class_name(category)
} for bbox, category in zip(bboxes, categories)]
def __getitem__(self, ind: int) -> Any:
image_name = self._image_names[ind]
return {
'image_name': os.path.join(self._image_dir, image_name),
'detections': self._detections[image_name]
}
def __len__(self) -> int:
return len(self._img_ids)
def get_num_classes(self) -> int:
return len(self._cat_ids)
|
[
"os.path.exists",
"pickle.dump",
"pycocotools.coco.COCO",
"os.path.join",
"pickle.load",
"numpy.array",
"json.load"
] |
[((420, 447), 'pycocotools.coco.COCO', 'COCO', (['self._annotation_file'], {}), '(self._annotation_file)\n', (424, 447), False, 'from pycocotools.coco import COCO\n'), ((1061, 1093), 'os.path.exists', 'os.path.exists', (['self._cache_file'], {}), '(self._cache_file)\n', (1075, 1093), False, 'import os\n'), ((1572, 1584), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1581, 1584), False, 'import json\n'), ((3290, 3331), 'os.path.join', 'os.path.join', (['self._image_dir', 'image_name'], {}), '(self._image_dir, image_name)\n', (3302, 3331), False, 'import os\n'), ((1240, 1293), 'pickle.dump', 'pickle.dump', (['[self._detections, self._image_names]', 'f'], {}), '([self._detections, self._image_names], f)\n', (1251, 1293), False, 'import pickle\n'), ((1454, 1468), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1465, 1468), False, 'import pickle\n'), ((2733, 2761), 'numpy.array', 'np.array', (["annotation['bbox']"], {}), "(annotation['bbox'])\n", (2741, 2761), True, 'import numpy as np\n')]
|
#!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
import uuid
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
try:
# generating variables dictionary
print('Generating infrastructure names and tags')
notebook_config = dict()
try:
notebook_config['exploratory_name'] = os.environ['exploratory_name'].replace('_', '-')
except:
notebook_config['exploratory_name'] = ''
try:
notebook_config['computational_name'] = os.environ['computational_name'].replace('_', '-')
except:
notebook_config['computational_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['resource_group_name'] = os.environ['azure_resource_group_name']
notebook_config['region'] = os.environ['azure_region']
notebook_config['user_name'] = os.environ['edge_user_name'].replace('_', '-')
notebook_config['project_name'] = os.environ['project_name'].replace('_', '-')
notebook_config['project_tag'] = os.environ['project_name'].replace('_', '-')
notebook_config['endpoint_tag'] = os.environ['endpoint_name'].replace('_', '-')
notebook_config['cluster_name'] = notebook_config['service_base_name'] + '-' + notebook_config['project_name'] + \
'-de-' + notebook_config['exploratory_name'] + '-' + \
notebook_config['computational_name']
notebook_config['master_node_name'] = notebook_config['cluster_name'] + '-m'
notebook_config['slave_node_name'] = notebook_config['cluster_name'] + '-s'
notebook_config['notebook_name'] = os.environ['notebook_instance_name']
notebook_config['key_path'] = os.environ['conf_key_dir'] + '/' + os.environ['conf_key_name'] + '.pem'
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['instance_count'] = int(os.environ['dataengine_instance_count'])
try:
notebook_config['spark_master_ip'] = AzureMeta().get_private_ip_address(
notebook_config['resource_group_name'], notebook_config['master_node_name'])
notebook_config['notebook_ip'] = AzureMeta().get_private_ip_address(
notebook_config['resource_group_name'], notebook_config['notebook_name'])
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
notebook_config['spark_master_url'] = 'spark://{}:7077'.format(notebook_config['spark_master_ip'])
except Exception as err:
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed to generate infrastructure names", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
print('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')
params = "--cluster_name {0} --spark_version {1} --hadoop_version {2} --os_user {3} --spark_master {4}" \
" --keyfile {5} --notebook_ip {6} --datalake_enabled {7} --spark_master_ip {8}".\
format(notebook_config['cluster_name'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], notebook_config['dlab_ssh_user'],
notebook_config['spark_master_url'], notebook_config['key_path'], notebook_config['notebook_ip'],
os.environ['azure_datalake_enable'], notebook_config['spark_master_ip'])
try:
local("~/scripts/{}_{}.py {}".format(os.environ['application'], 'install_dataengine_kernels', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed installing Dataengine kernels.", str(err))
sys.exit(1)
try:
logging.info('[UPDATING SPARK CONFIGURATION FILES ON NOTEBOOK]')
print('[UPDATING SPARK CONFIGURATION FILES ON NOTEBOOK]')
params = "--hostname {0} " \
"--keyfile {1} " \
"--os_user {2} " \
"--cluster_name {3} " \
.format(notebook_config['notebook_ip'],
notebook_config['key_path'],
notebook_config['dlab_ssh_user'],
notebook_config['cluster_name'])
try:
local("~/scripts/{0}.py {1}".format('common_configure_spark', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
for i in range(notebook_config['instance_count'] - 1):
slave_name = notebook_config['slave_node_name'] + '{}'.format(i+1)
AzureActions().remove_instance(notebook_config['resource_group_name'], slave_name)
AzureActions().remove_instance(notebook_config['resource_group_name'], notebook_config['master_node_name'])
append_result("Failed to configure Spark.", str(err))
sys.exit(1)
try:
with open("/root/result.json", 'w') as result:
res = {"notebook_name": notebook_config['notebook_name'],
"Action": "Configure notebook server"}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(0)
|
[
"logging.basicConfig",
"json.dumps",
"logging.info",
"sys.exit"
] |
[((1413, 1539), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)-8s [%(asctime)s] %(message)s"""', 'level': 'logging.DEBUG', 'filename': 'local_log_filepath'}), "(format='%(levelname)-8s [%(asctime)s] %(message)s',\n level=logging.DEBUG, filename=local_log_filepath)\n", (1432, 1539), False, 'import logging\n'), ((4562, 4622), 'logging.info', 'logging.info', (['"""[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]"""'], {}), "('[INSTALLING KERNELS INTO SPECIFIED NOTEBOOK]')\n", (4574, 4622), False, 'import logging\n'), ((6028, 6092), 'logging.info', 'logging.info', (['"""[UPDATING SPARK CONFIGURATION FILES ON NOTEBOOK]"""'], {}), "('[UPDATING SPARK CONFIGURATION FILES ON NOTEBOOK]')\n", (6040, 6092), False, 'import logging\n'), ((4532, 4543), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4540, 4543), False, 'import sys\n'), ((5998, 6009), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6006, 6009), False, 'import sys\n'), ((7183, 7194), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7191, 7194), False, 'import sys\n'), ((7526, 7537), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7534, 7537), False, 'import sys\n'), ((3947, 3958), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3955, 3958), False, 'import sys\n'), ((7406, 7421), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (7416, 7421), False, 'import json\n'), ((7448, 7463), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (7458, 7463), False, 'import json\n')]
|
# usr/bin/env python
"""Functions to cluster using UPGMA
upgma takes an dictionary of pair tuples mapped to distances as input.
UPGMA_cluster takes an array and a list of PhyloNode objects corresponding
to the array as input. Can also generate this type of input from a DictArray using
inputs_from_dict_array function.
Both return a PhyloNode object of the UPGMA cluster
"""
import numpy
from numpy import argmin, array, average, diag, ma, ravel, sum, take
from cogent3.core.tree import PhyloNode
from cogent3.util.dict_array import DictArray
__author__ = "<NAME>"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "BSD-3"
__version__ = "2020.7.2a"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
numerictypes = numpy.core.numerictypes.sctype2char
Float = numerictypes(float)
BIG_NUM = 1e305
def upgma(pairwise_distances):
"""Uses the UPGMA algorithm to cluster sequences
pairwise_distances: a dictionary with pair tuples mapped to a distance
returns a PhyloNode object of the UPGMA cluster
"""
darr = DictArray(pairwise_distances)
matrix_a, node_order = inputs_from_dict_array(darr)
tree = UPGMA_cluster(matrix_a, node_order, BIG_NUM)
index = 0
for node in tree.traverse():
if not node.parent:
node.name = "root"
elif not node.name:
node.name = "edge." + str(index)
index += 1
return tree
def find_smallest_index(matrix):
"""returns the index of the smallest element in a numpy array
for UPGMA clustering elements on the diagonal should first be
substituted with a very large number so that they are always
larger than the rest if the values in the array."""
# get the shape of the array as a tuple (e.g. (3,3))
shape = matrix.shape
# turn into a 1 by x array and get the index of the lowest number
matrix1D = ravel(matrix)
lowest_index = argmin(matrix1D)
# convert the lowest_index derived from matrix1D to one for the original
# square matrix and return
row_len = shape[0]
return divmod(lowest_index, row_len)
def condense_matrix(matrix, smallest_index, large_value):
"""converges the rows and columns indicated by smallest_index
Smallest index is returned from find_smallest_index.
For both the rows and columns, the values for the two indices are
averaged. The resulting vector replaces the first index in the array
and the second index is replaced by an array with large numbers so that
it is never chosen again with find_smallest_index.
"""
first_index, second_index = smallest_index
# get the rows and make a new vector that has their average
rows = take(matrix, smallest_index, 0)
new_vector = average(rows, 0)
# replace info in the row and column for first index with new_vector
matrix[first_index] = new_vector
matrix[:, first_index] = new_vector
# replace the info in the row and column for the second index with
# high numbers so that it is ignored
matrix[second_index] = large_value
matrix[:, second_index] = large_value
return matrix
def condense_node_order(matrix, smallest_index, node_order):
"""condenses two nodes in node_order based on smallest_index info
This function is used to create a tree while condensing a matrix
with the condense_matrix function. The smallest_index is retrieved
with find_smallest_index. The first index is replaced with a node object
that combines the two nodes corresponding to the indices in node order.
The second index in smallest_index is replaced with None.
Also sets the branch length of the nodes to 1/2 of the distance between
the nodes in the matrix"""
index1, index2 = smallest_index
node1 = node_order[index1]
node2 = node_order[index2]
# get the distance between the nodes and assign 1/2 the distance to the
# lengthproperty of each node
distance = matrix[index1, index2]
nodes = [node1, node2]
d = distance / 2.0
for n in nodes:
if n.children:
n.length = d - n.children[0].TipLength
else:
n.length = d
n.TipLength = d
# combine the two nodes into a new PhyloNode object
new_node = PhyloNode()
new_node.children.append(node1)
new_node.children.append(node2)
node1.parent = new_node
node2.parent = new_node
# replace the object at index1 with the combined node
node_order[index1] = new_node
# replace the object at index2 with None
node_order[index2] = None
return node_order
def UPGMA_cluster(matrix, node_order, large_number):
"""cluster with UPGMA
matrix is a numpy array.
node_order is a list of PhyloNode objects corresponding to the matrix.
large_number will be assigned to the matrix during the process and
should be much larger than any value already in the matrix.
WARNING: Changes matrix in-place.
WARNING: Expects matrix to already have diagonals assigned to large_number
before this function is called.
"""
num_entries = len(node_order)
tree = None
for i in range(num_entries - 1):
smallest_index = find_smallest_index(matrix)
index1, index2 = smallest_index
# if smallest_index is on the diagonal set the diagonal to large_number
if index1 == index2:
matrix[diag([True] * len(matrix))] = large_number
smallest_index = find_smallest_index(matrix)
row_order = condense_node_order(matrix, smallest_index, node_order)
matrix = condense_matrix(matrix, smallest_index, large_number)
tree = node_order[smallest_index[0]]
return tree
def inputs_from_dict_array(darr):
"""makes inputs for UPGMA_cluster from a DictArray object
"""
darr.array += numpy.eye(darr.shape[0]) * BIG_NUM
nodes = list(map(PhyloNode, darr.keys()))
return darr.array, nodes
|
[
"numpy.eye",
"numpy.average",
"cogent3.core.tree.PhyloNode",
"cogent3.util.dict_array.DictArray",
"numpy.take",
"numpy.argmin",
"numpy.ravel"
] |
[((1128, 1157), 'cogent3.util.dict_array.DictArray', 'DictArray', (['pairwise_distances'], {}), '(pairwise_distances)\n', (1137, 1157), False, 'from cogent3.util.dict_array import DictArray\n'), ((1944, 1957), 'numpy.ravel', 'ravel', (['matrix'], {}), '(matrix)\n', (1949, 1957), False, 'from numpy import argmin, array, average, diag, ma, ravel, sum, take\n'), ((1977, 1993), 'numpy.argmin', 'argmin', (['matrix1D'], {}), '(matrix1D)\n', (1983, 1993), False, 'from numpy import argmin, array, average, diag, ma, ravel, sum, take\n'), ((2754, 2785), 'numpy.take', 'take', (['matrix', 'smallest_index', '(0)'], {}), '(matrix, smallest_index, 0)\n', (2758, 2785), False, 'from numpy import argmin, array, average, diag, ma, ravel, sum, take\n'), ((2803, 2819), 'numpy.average', 'average', (['rows', '(0)'], {}), '(rows, 0)\n', (2810, 2819), False, 'from numpy import argmin, array, average, diag, ma, ravel, sum, take\n'), ((4301, 4312), 'cogent3.core.tree.PhyloNode', 'PhyloNode', ([], {}), '()\n', (4310, 4312), False, 'from cogent3.core.tree import PhyloNode\n'), ((5862, 5886), 'numpy.eye', 'numpy.eye', (['darr.shape[0]'], {}), '(darr.shape[0])\n', (5871, 5886), False, 'import numpy\n')]
|
#this code will generate the structural verilog for a single entry in the register file
#takes in the output file manager, the entry number, the number of bits, the number of reads, and the width of the
#tristate buffers on the read outputs
#expects the same things as make_store_cell, ensure code is valid there
#<NAME>
#EE 526
#4/20/21
from make_store_cell import make_store_cell
def make_store_entry(out_file, entry_number, bits, reads, buff_width, regfile_num):
#just need to create the correct number of bits
#this and the make_store_array are going to be pretty simple
for bit in range(bits):
make_store_cell(out_file, entry_number, bit, reads, buff_width, regfile_num)
return
if __name__ == '__main__':
f = open('store_entry_test.txt', 'w')
rows = 4
cols = 2
reads = 2
for row in range(rows):
make_store_entry(f, row, cols, reads, 1, 0)
f.close()
|
[
"make_store_cell.make_store_cell"
] |
[((608, 684), 'make_store_cell.make_store_cell', 'make_store_cell', (['out_file', 'entry_number', 'bit', 'reads', 'buff_width', 'regfile_num'], {}), '(out_file, entry_number, bit, reads, buff_width, regfile_num)\n', (623, 684), False, 'from make_store_cell import make_store_cell\n')]
|
"""API for AVB"""
import json
import sys
import requests
def actualite_found ():
osm = "https://opendata.bruxelles.be/api/datasets/1.0/search/?q="
data = {
"nhits":0,
"parameters":{
"dataset":"actualites-ville-de-bruxelles",
"timezone":"UTC",
"q":"actualite",
"language": "fr",
"rows":10,
"start":0,
"sort":[
"published"
]
,
"format":"json"
}
,
"records":[]
}
resp = requests.get(osm, data)
if resp.status_code == 200:
print(resp.json()["datasets"][0]["metas"])
else:
print("actualite not found")
return resp
def get_result(resp,n,attribut):
metas = resp.json()["datasets"][n]["metas"]
return metas[attribut]
def nb_result(resp):
return len(resp.json()["datasets"])
#Example of use
if __name__ == "__main__":
resp = actualite_found()
result = get_result(resp,2,"description")
print(result)
print(nb_result(resp))
|
[
"requests.get"
] |
[((501, 524), 'requests.get', 'requests.get', (['osm', 'data'], {}), '(osm, data)\n', (513, 524), False, 'import requests\n')]
|
from django.contrib.auth.models import User
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from api.serializers import TODOListSerializer
from api.models import TODOList
class TODOListViewSet(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated]
serializer_class = TODOListSerializer
def get_queryset(self):
user = self.request.user
return TODOList.objects.filter(owner=self.request.user).order_by('created_at')
def create(self, request, *args, **kwargs):
request.data['owner'] = request.user.id
return super(self.__class__, self).create(request, *args, **kwargs)
|
[
"api.models.TODOList.objects.filter"
] |
[((491, 539), 'api.models.TODOList.objects.filter', 'TODOList.objects.filter', ([], {'owner': 'self.request.user'}), '(owner=self.request.user)\n', (514, 539), False, 'from api.models import TODOList\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def copy_existing_referrals_into_new_field(apps, schema_editor):
Pledge = apps.get_model('donation', 'Pledge')
Referral = apps.get_model('donation', 'Referral')
reasons = Pledge.objects.values_list('how_did_you_hear_about_us', flat=True).distinct()
for reason in reasons:
if reason: # Filter out None and u''
Referral.objects.create(reason=reason)
for pledge in Pledge.objects.all():
reason = pledge.how_did_you_hear_about_us
if reason:
pledge.how_did_you_hear_about_us_db = Referral.objects.get(reason=reason)
pledge.save()
class Migration(migrations.Migration):
dependencies = [
('donation', '0042_amend_donation_view'),
]
operations = [
migrations.CreateModel(
name='Referral',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('reason', models.CharField(max_length=256)),
],
),
migrations.AddField(
model_name='pledge',
name='how_did_you_hear_about_us_db',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, verbose_name='How did you hear about us?', blank=True, to='donation.Referral', null=True),
),
migrations.RunPython(copy_existing_referrals_into_new_field)
]
|
[
"django.db.migrations.RunPython",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] |
[((1474, 1534), 'django.db.migrations.RunPython', 'migrations.RunPython', (['copy_existing_referrals_into_new_field'], {}), '(copy_existing_referrals_into_new_field)\n', (1494, 1534), False, 'from django.db import migrations, models\n'), ((1301, 1463), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'verbose_name': '"""How did you hear about us?"""', 'blank': '(True)', 'to': '"""donation.Referral"""', 'null': '(True)'}), "(on_delete=django.db.models.deletion.PROTECT, verbose_name\n ='How did you hear about us?', blank=True, to='donation.Referral', null\n =True)\n", (1318, 1463), False, 'from django.db import migrations, models\n'), ((992, 1085), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (1008, 1085), False, 'from django.db import migrations, models\n'), ((1111, 1143), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (1127, 1143), False, 'from django.db import migrations, models\n')]
|
import heapq
import time
from os import path
from math import floor
class Heap:
def __init__(self):
self.size = 0
self.array = []
self.v2index_map = {}
def __get_parent_index(self, idx):
return int(floor((idx - 1) / 2))
def __get_left_child_index(self, idx):
return 2 * idx + 1
def __get_right_child_index(self, idx):
return 2 * idx + 2
def __swap_value(self, i, j):
t = self.array[i]
self.v2index_map[t[0]] = j
self.v2index_map[self.array[j][0]] = i
self.array[i] = self.array[j]
self.array[j] = t
def __bubble_up(self, idx):
parent_idx = self.__get_parent_index(idx)
while parent_idx >= 0:
if self.array[parent_idx][1] <= self.array[idx][1]:
break
self.__swap_value(parent_idx, idx)
idx = parent_idx
parent_idx = self.__get_parent_index(idx)
def __bubble_down(self, idx):
left_idx = self.__get_left_child_index(idx)
right_idx = self.__get_right_child_index(idx)
while left_idx < self.size or right_idx < self.size:
min_idx = left_idx
if left_idx >= self.size or (right_idx < self.size and self.array[right_idx][1] < self.array[left_idx][1]):
min_idx = right_idx
if self.array[idx][1] < self.array[min_idx][1]:
break
self.__swap_value(idx, min_idx)
idx = min_idx
left_idx = self.__get_left_child_index(idx)
right_idx = self.__get_right_child_index(idx)
def get_vertex_key(self, v_id):
return self.array[self.v2index_map[v_id]][1]
def pop(self):
if self.size < 1:
raise IndexError
min_node = self.array[0]
self.size = self.size - 1
self.__swap_value(0, self.size)
self.array.pop()
if self.size > 1:
self.__bubble_down(0)
del self.v2index_map[min_node[0]]
return min_node
def insert(self, node):
self.array.append(node)
self.v2index_map[node[0]] = self.size
self.size = self.size + 1
if self.size > 1:
self.__bubble_up(self.size - 1)
def modify_key(self, v_id, update_val):
idx = self.v2index_map[v_id]
self.array[idx] = (v_id, update_val)
parent_idx = self.__get_parent_index(idx)
if parent_idx >= 0 and self.array[idx][1] < self.array[parent_idx][1]:
self.__bubble_up(idx)
else:
self.__bubble_down(idx)
def read_graph(filename):
graph = dict()
with open(path.join('.', filename), 'r') as f:
for row in f.readlines():
edges = row.strip('\t\n').split('\t')
s = int(edges[0])
graph[s] = []
for i in range(1, len(edges)):
edge = edges[i].split(',')
graph[s].append((int(edge[0]), int(edge[1])))
return graph
def get_shortest_paths_heapq(graph):
heap = []
heapq.heappush(heap, (0, 1)) # (dj_score, vertex_id)
distances = {i: 1000000 for i in graph}
distances[1] = 0
X = []
while heap:
cur_distance, cur_v = heapq.heappop(heap)
if cur_distance > distances[cur_v]:
continue
# added to X
X.append(cur_v)
for neighbor, weight in graph[cur_v]:
dj_score = cur_distance + weight
if dj_score < distances[neighbor]:
distances[neighbor] = dj_score
heapq.heappush(heap, (dj_score, neighbor))
return distances, X
def get_shortest_paths_self_defined_heap(graph):
heap = Heap()
heap.insert((1, 0)) # (vertex_id, dj_score)
for v in graph:
if v != 1:
heap.insert((v, 1000000))
shortest_paths = dict()
n_v = len(graph)
while len(shortest_paths) < n_v:
assert len(shortest_paths) + heap.size == n_v
cur_v, v_score = heap.pop()
shortest_paths[cur_v] = v_score
for neighbor, weight in graph[cur_v]:
dj_score = v_score + weight
# import pdb;pdb.set_trace()
if neighbor not in shortest_paths and dj_score < heap.get_vertex_key(neighbor):
heap.modify_key(neighbor, dj_score)
return shortest_paths
if __name__ == "__main__":
# test case 1, output: {1: 0, 2: 1, 3: 2, 4: 2, 5: 3, 6: 4}
# graph = {
# 1: [(6, 7), (5, 3), (2, 1), (4, 2), (3, 3)],
# 2: [(1, 1), (3, 1), (4, 1), (6, 6)],
# 3: [(1, 3), (2, 1), (6, 2)],
# 4: [(2, 1), (1, 2), (6, 5)],
# 5: [(1, 3), (6, 3)],
# 6: [(1, 7), (3, 2), (2, 6), (4, 5), (5, 3)]
# }
graph = read_graph("Dijkstra.txt")
dedup_edges = set()
for k, _ in graph.items():
for v in _:
dedup_edges.add((k, v[0], v[1]))
dedup_edges.add((v[0], k, v[1]))
assert len(dedup_edges) == sum([len(e) for e in graph.values()])
# graph = {}
# heap = Heap()
# heap.insert((1,0))
# heap.insert((2,0))
# heap.pop()
start_t = time.time()
min_distances,X = get_shortest_paths_heapq(graph)
print(time.time() - start_t)
# print(min_distances)
e = [7, 37, 59, 82, 99, 115, 133, 165, 188, 197]
print(",".join([str(int(min_distances[i])) for i in e]))
start_t = time.time()
min_distances = get_shortest_paths_self_defined_heap(graph, X)
print(time.time() - start_t)
# print(min_distances)
e = [7, 37, 59, 82, 99, 115, 133, 165, 188, 197]
print(",".join([str(int(min_distances[i])) for i in e]))
|
[
"math.floor",
"os.path.join",
"heapq.heappop",
"heapq.heappush",
"time.time"
] |
[((3047, 3075), 'heapq.heappush', 'heapq.heappush', (['heap', '(0, 1)'], {}), '(heap, (0, 1))\n', (3061, 3075), False, 'import heapq\n'), ((5116, 5127), 'time.time', 'time.time', ([], {}), '()\n', (5125, 5127), False, 'import time\n'), ((5372, 5383), 'time.time', 'time.time', ([], {}), '()\n', (5381, 5383), False, 'import time\n'), ((3224, 3243), 'heapq.heappop', 'heapq.heappop', (['heap'], {}), '(heap)\n', (3237, 3243), False, 'import heapq\n'), ((241, 261), 'math.floor', 'floor', (['((idx - 1) / 2)'], {}), '((idx - 1) / 2)\n', (246, 261), False, 'from math import floor\n'), ((2646, 2670), 'os.path.join', 'path.join', (['"""."""', 'filename'], {}), "('.', filename)\n", (2655, 2670), False, 'from os import path\n'), ((5192, 5203), 'time.time', 'time.time', ([], {}), '()\n', (5201, 5203), False, 'import time\n'), ((5461, 5472), 'time.time', 'time.time', ([], {}), '()\n', (5470, 5472), False, 'import time\n'), ((3557, 3599), 'heapq.heappush', 'heapq.heappush', (['heap', '(dj_score, neighbor)'], {}), '(heap, (dj_score, neighbor))\n', (3571, 3599), False, 'import heapq\n')]
|
# -*- coding: utf-8 -*-
"""Test the terminaltables output adapter."""
from __future__ import unicode_literals
from textwrap import dedent
import pytest
from cli_helpers.compat import HAS_PYGMENTS
from cli_helpers.tabular_output import terminaltables_adapter
if HAS_PYGMENTS:
from pygments.style import Style
from pygments.token import Token
def test_terminal_tables_adapter():
"""Test the terminaltables output adapter."""
data = [['abc', 1], ['d', 456]]
headers = ['letters', 'number']
output = terminaltables_adapter.adapter(
iter(data), headers, table_format='ascii')
assert "\n".join(output) == dedent('''\
+---------+--------+
| letters | number |
+---------+--------+
| abc | 1 |
| d | 456 |
+---------+--------+''')
@pytest.mark.skipif(not HAS_PYGMENTS, reason='requires the Pygments library')
def test_style_output_table():
"""Test that *style_output_table()* styles the output table."""
class CliStyle(Style):
default_style = ""
styles = {
Token.Output.TableSeparator: '#ansired',
}
headers = ['h1', 'h2']
data = [['观音', '2'], ['Ποσειδῶν', 'b']]
style_output_table = terminaltables_adapter.style_output_table('ascii')
style_output_table(data, headers, style=CliStyle)
output = terminaltables_adapter.adapter(iter(data), headers, table_format='ascii')
assert "\n".join(output) == dedent('''\
\x1b[31;01m+\x1b[39;00m''' + (
('\x1b[31;01m-\x1b[39;00m' * 10) +
'\x1b[31;01m+\x1b[39;00m' +
('\x1b[31;01m-\x1b[39;00m' * 4)) +
'''\x1b[31;01m+\x1b[39;00m
\x1b[31;01m|\x1b[39;00m h1 \x1b[31;01m|\x1b[39;00m''' +
''' h2 \x1b[31;01m|\x1b[39;00m
''' + '\x1b[31;01m+\x1b[39;00m' + (
('\x1b[31;01m-\x1b[39;00m' * 10) +
'\x1b[31;01m+\x1b[39;00m' +
('\x1b[31;01m-\x1b[39;00m' * 4)) +
'''\x1b[31;01m+\x1b[39;00m
\x1b[31;01m|\x1b[39;00m 观音 \x1b[31;01m|\x1b[39;00m''' +
''' 2 \x1b[31;01m|\x1b[39;00m
\x1b[31;01m|\x1b[39;00m Ποσειδῶν \x1b[31;01m|\x1b[39;00m''' +
''' b \x1b[31;01m|\x1b[39;00m
''' + '\x1b[31;01m+\x1b[39;00m' + (
('\x1b[31;01m-\x1b[39;00m' * 10) +
'\x1b[31;01m+\x1b[39;00m' +
('\x1b[31;01m-\x1b[39;00m' * 4)) +
'\x1b[31;01m+\x1b[39;00m')
|
[
"cli_helpers.tabular_output.terminaltables_adapter.style_output_table",
"textwrap.dedent",
"pytest.mark.skipif"
] |
[((834, 910), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_PYGMENTS)'], {'reason': '"""requires the Pygments library"""'}), "(not HAS_PYGMENTS, reason='requires the Pygments library')\n", (852, 910), False, 'import pytest\n'), ((1243, 1293), 'cli_helpers.tabular_output.terminaltables_adapter.style_output_table', 'terminaltables_adapter.style_output_table', (['"""ascii"""'], {}), "('ascii')\n", (1284, 1293), False, 'from cli_helpers.tabular_output import terminaltables_adapter\n'), ((641, 838), 'textwrap.dedent', 'dedent', (['""" +---------+--------+\n | letters | number |\n +---------+--------+\n | abc | 1 |\n | d | 456 |\n +---------+--------+"""'], {}), '(\n """ +---------+--------+\n | letters | number |\n +---------+--------+\n | abc | 1 |\n | d | 456 |\n +---------+--------+"""\n )\n', (647, 838), False, 'from textwrap import dedent\n'), ((1469, 2251), 'textwrap.dedent', 'dedent', (['(\' \\x1b[31;01m+\\x1b[39;00m\' + (\'\\x1b[31;01m-\\x1b[39;00m\' * 10 +\n \'\\x1b[31;01m+\\x1b[39;00m\' + \'\\x1b[31;01m-\\x1b[39;00m\' * 4) +\n """\x1b[31;01m+\x1b[39;00m\n \x1b[31;01m|\x1b[39;00m h1 \x1b[31;01m|\x1b[39;00m"""\n + \' h2 \\x1b[31;01m|\\x1b[39;00m\\n \' + \'\\x1b[31;01m+\\x1b[39;00m\' +\n (\'\\x1b[31;01m-\\x1b[39;00m\' * 10 + \'\\x1b[31;01m+\\x1b[39;00m\' + \n \'\\x1b[31;01m-\\x1b[39;00m\' * 4) +\n """\x1b[31;01m+\x1b[39;00m\n \x1b[31;01m|\x1b[39;00m 观音 \x1b[31;01m|\x1b[39;00m"""\n +\n """ 2 \x1b[31;01m|\x1b[39;00m\n \x1b[31;01m|\x1b[39;00m Ποσειδῶν \x1b[31;01m|\x1b[39;00m"""\n + \' b \\x1b[31;01m|\\x1b[39;00m\\n \' + \'\\x1b[31;01m+\\x1b[39;00m\' +\n (\'\\x1b[31;01m-\\x1b[39;00m\' * 10 + \'\\x1b[31;01m+\\x1b[39;00m\' + \n \'\\x1b[31;01m-\\x1b[39;00m\' * 4) + \'\\x1b[31;01m+\\x1b[39;00m\')'], {}), '(\' \\x1b[31;01m+\\x1b[39;00m\' + (\'\\x1b[31;01m-\\x1b[39;00m\' * 10 +\n \'\\x1b[31;01m+\\x1b[39;00m\' + \'\\x1b[31;01m-\\x1b[39;00m\' * 4) +\n """\x1b[31;01m+\x1b[39;00m\n \x1b[31;01m|\x1b[39;00m h1 \x1b[31;01m|\x1b[39;00m"""\n + \' h2 \\x1b[31;01m|\\x1b[39;00m\\n \' + \'\\x1b[31;01m+\\x1b[39;00m\' +\n (\'\\x1b[31;01m-\\x1b[39;00m\' * 10 + \'\\x1b[31;01m+\\x1b[39;00m\' + \n \'\\x1b[31;01m-\\x1b[39;00m\' * 4) +\n """\x1b[31;01m+\x1b[39;00m\n \x1b[31;01m|\x1b[39;00m 观音 \x1b[31;01m|\x1b[39;00m"""\n +\n """ 2 \x1b[31;01m|\x1b[39;00m\n \x1b[31;01m|\x1b[39;00m Ποσειδῶν \x1b[31;01m|\x1b[39;00m"""\n + \' b \\x1b[31;01m|\\x1b[39;00m\\n \' + \'\\x1b[31;01m+\\x1b[39;00m\' +\n (\'\\x1b[31;01m-\\x1b[39;00m\' * 10 + \'\\x1b[31;01m+\\x1b[39;00m\' + \n \'\\x1b[31;01m-\\x1b[39;00m\' * 4) + \'\\x1b[31;01m+\\x1b[39;00m\')\n', (1475, 2251), False, 'from textwrap import dedent\n')]
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# __author__ = '__MeGustas__'
from django.test import TestCase
from django.db import connection
from tutorials.create_table.models import *
# Create your tests here.
class TestHealthFile(TestCase):
def setUp(self):
cursor = connection.cursor()
# Populate Customers table
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact, cust_email) \
VALUES('1000000001', 'Village Toys', '200 Maple Lane', 'Detroit', 'MI', '44444', 'USA', '<NAME>', '<EMAIL>');")
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact) \
VALUES('1000000002', 'Kids Place', '333 South Lake Drive', 'Columbus', 'OH', '43333', 'USA', 'Michelle Green');")
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact, cust_email) \
VALUES('1000000003', 'Fun4All', '1 Sunny Place', 'Muncie', 'IN', '42222', 'USA', '<NAME>', '<EMAIL>');")
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact, cust_email) \
VALUES('1000000004', 'Fun4All', '829 Riverside Drive', 'Phoenix', 'AZ', '88888', 'USA', '<NAME>', '<EMAIL>');")
cursor.execute("INSERT INTO Customers(cust_id, cust_name, cust_address, cust_city, cust_state, cust_zip, cust_country, cust_contact) \
VALUES('1000000005', 'The Toy Store', '4545 53rd Street', 'Chicago', 'IL', '54545', 'USA', '<NAME>');")
# Populate Vendors table
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('BRS01','Bears R Us','123 Main Street','Bear Town','MI','44444', 'USA');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('BRE02','Bear Emporium','500 Park Street','Anytown','OH','44333', 'USA');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('DLL01','Doll House Inc.','555 High Street','Dollsville','CA','99999', 'USA');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('FRB01','Furball Inc.','1000 5th Avenue','New York','NY','11111', 'USA');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('FNG01','Fun and Games','42 Galaxy Road','London', NULL,'N16 6PS', 'England');")
cursor.execute("INSERT INTO Vendors(vend_id, vend_name, vend_address, vend_city, vend_state, vend_zip, vend_country) \
VALUES('JTS01','Jouets et ours','1 Rue Amusement','Paris', NULL,'45678', 'France');")
# Populate Products table
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BR01', 'BRS01', '8 inch teddy bear', 5.99, '8 inch teddy bear, comes with cap and jacket');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BR02', 'BRS01', '12 inch teddy bear', 8.99, '12 inch teddy bear, comes with cap and jacket');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BR03', 'BRS01', '18 inch teddy bear', 11.99, '18 inch teddy bear, comes with cap and jacket');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BNBG01', 'DLL01', 'Fish bean bag toy', 3.49, 'Fish bean bag toy, complete with bean bag worms with which to feed it');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BNBG02', 'DLL01', 'Bird bean bag toy', 3.49, 'Bird bean bag toy, eggs are not included');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('BNBG03', 'DLL01', 'Rabbit bean bag toy', 3.49, 'Rabbit bean bag toy, comes with bean bag carrots');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('RGAN01', 'DLL01', 'Raggedy Ann', 4.99, '18 inch Raggedy Ann doll');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('RYL01', 'FNG01', 'King doll', 9.49, '12 inch king doll with royal garments and crown');")
cursor.execute("INSERT INTO Products(prod_id, vend_id, prod_name, prod_price, prod_desc) \
VALUES('RYL02', 'FNG01', 'Queen doll', 9.49, '12 inch queen doll with royal garments and crown');")
# Populate Orders table
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20005, '2020-05-01', '1000000001');")
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20006, '2020-01-12', '1000000003');")
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20007, '2020-01-30', '1000000004');")
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20008, '2020-02-03', '1000000005');")
cursor.execute("INSERT INTO Orders(order_num, order_date, cust_id) \
VALUES(20009, '2020-02-08', '1000000001');")
# Populate OrderItems table
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20005, 1, 'BR01', 100, 5.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20005, 2, 'BR03', 100, 10.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20006, 1, 'BR01', 20, 5.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20006, 2, 'BR02', 10, 8.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20006, 3, 'BR03', 10, 11.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 1, 'BR03', 50, 11.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 2, 'BNBG01', 100, 2.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 3, 'BNBG02', 100, 2.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 4, 'BNBG03', 100, 2.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20007, 5, 'RGAN01', 50, 4.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 1, 'RGAN01', 5, 4.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 2, 'BR03', 5, 11.99);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 3, 'BNBG01', 10, 3.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 4, 'BNBG02', 10, 3.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20008, 5, 'BNBG03', 10, 3.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20009, 1, 'BNBG01', 250, 2.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20009, 2, 'BNBG02', 250, 2.49);")
cursor.execute("INSERT INTO OrderItems(order_num, order_item, prod_id, quantity, item_price) \
VALUES(20009, 3, 'BNBG03', 250, 2.49);")
def tearDown(self):
# Clean up run after every test method.
Customers.objects.all().delete()
Vendors.objects.all().delete()
Orders.objects.all().delete()
OrderItems.objects.all().delete()
Products.objects.all().delete()
def test_customers(self):
for i in Customers.objects.all():
print(i.to_dict())
for i in Vendors.objects.all():
print(i.to_dict())
for i in Orders.objects.all():
print(i.to_dict())
for i in OrderItems.objects.all():
print(i.to_dict())
for i in Products.objects.all():
print(i.to_dict())
|
[
"django.db.connection.cursor"
] |
[((284, 303), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (301, 303), False, 'from django.db import connection\n')]
|
import sys
sys.path.append("..") # change environment to see tools
from make_hydrodem import bathymetricGradient
workspace = r"" # path to geodatabase to use as a workspace
snapGrid = r"" # path to snapping grid
hucPoly = r"" # path to local folder polygon
hydrographyArea = r"" # path to NHD area feature class
hydrographyFlowline = r"" # path to NHD flowline feature class
hydrographyWaterbody = r"" # path to NHD water body feature class
cellsize = '' # cell size
bathymetricGradient(workspace, snapGrid, hucPoly, hydrographyArea,
hydrographyFlowline, hydrographyWaterbody,cellsize)
|
[
"sys.path.append",
"make_hydrodem.bathymetricGradient"
] |
[((12, 33), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (27, 33), False, 'import sys\n'), ((482, 605), 'make_hydrodem.bathymetricGradient', 'bathymetricGradient', (['workspace', 'snapGrid', 'hucPoly', 'hydrographyArea', 'hydrographyFlowline', 'hydrographyWaterbody', 'cellsize'], {}), '(workspace, snapGrid, hucPoly, hydrographyArea,\n hydrographyFlowline, hydrographyWaterbody, cellsize)\n', (501, 605), False, 'from make_hydrodem import bathymetricGradient\n')]
|
# -*- coding: utf-8 -*-
# @Time: 2020/11/8 23:47
# @Author: GraceKoo
# @File: test.py
# @Desc:
from threading import Thread
import time
def print_numbers():
time.sleep(0.2)
print("子线程结束")
if __name__ == "__main__":
t1 = Thread(target=print_numbers)
t1.setDaemon(True)
t1.start()
# print("主线程结束")
|
[
"threading.Thread",
"time.sleep"
] |
[((163, 178), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (173, 178), False, 'import time\n'), ((236, 264), 'threading.Thread', 'Thread', ([], {'target': 'print_numbers'}), '(target=print_numbers)\n', (242, 264), False, 'from threading import Thread\n')]
|
"""
Django settings for massenergize_portal_backend project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import firebase_admin
from firebase_admin import credentials
from .utils.utils import load_json
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# ******** LOAD CONFIG DATA ***********#
IS_PROD = False
path_to_config = '/_main_/config/massenergizeProdConfig.json' if IS_PROD else '/_main_/config/massenergizeProjectConfig.json'
CONFIG_DATA = load_json(BASE_DIR + path_to_config)
os.environ.update(CONFIG_DATA)
# ******** END LOAD CONFIG DATA ***********#
SECRET_KEY = CONFIG_DATA["SECRET_KEY"]
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
'api.massenergize.org',
'apis.massenergize.org',
'api.massenergize.com',
'apis.massenergize.com',
'api-prod.massenergize.org',
'api.prod.massenergize.org',
'api-dev.massenergize.org',
'api.dev.massenergize.org',
'massenergize-api.wpdvzstek2.us-east-2.elasticbeanstalk.com'
]
INSTALLED_APPS = [
'authentication',
'carbon_calculator',
'database',
'api',
'website',
'corsheaders',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#custom middlewares
'authentication.middleware.MassenergizeJWTAuthMiddleware'
]
#-------- FILE STORAGE CONFIGURATION ---------------------#
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
#-------- FILE STORAGE CONFIGURATION ---------------------#
#-------- AWS CONFIGURATION ---------------------#
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_S3_SIGNATURE_VERSION = os.environ.get('AWS_S3_SIGNATURE_VERSION')
AWS_S3_REGION_NAME = os.environ.get('AWS_S3_REGION_NAME')
AWS_DEFAULT_ACL = None
#--------END AWS CONFIGURATION ---------------------#
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440*3
ROOT_URLCONF = '_main_.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '_main_.wsgi.application'
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'remote-default': {
'ENGINE' : os.environ.get('DATABASE_ENGINE'),
'NAME' : os.environ.get('DATABASE_NAME'),
'USER' : os.environ.get('DATABASE_USER'),
'PASSWORD' : os.environ.get('DATABASE_PASSWORD'),
'HOST' : os.environ.get('DATABASE_HOST'),
'PORT' : os.environ.get('DATABASE_PORT')
},
'default': {
'ENGINE' : os.environ.get('DATABASE_ENGINE'),
'NAME' : 'gchekler21',
'USER' : '',
'PASSWORD' : '',
'HOST' : 'localhost',
'PORT' : '5555'
},
}
firebase_service_account_path = '/_main_/config/massenergizeProdFirebaseServiceAccount.json' if IS_PROD else '/_main_/config/massenergizeFirebaseServiceAccount.json'
FIREBASE_CREDENTIALS = credentials.Certificate(BASE_DIR + firebase_service_account_path)
firebase_admin.initialize_app(FIREBASE_CREDENTIALS)
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = os.environ.get('EMAIL')
DEFAULT_FROM_EMAIL = os.environ.get('EMAIL')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASSWORD')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
# Simplified static file serving.
STATICFILES_LOCATION = 'static'
MEDIAFILES_LOCATION = 'media'
|
[
"firebase_admin.initialize_app",
"os.environ.get",
"os.environ.update",
"firebase_admin.credentials.Certificate",
"os.path.abspath"
] |
[((814, 844), 'os.environ.update', 'os.environ.update', (['CONFIG_DATA'], {}), '(CONFIG_DATA)\n', (831, 844), False, 'import os\n'), ((2574, 2609), 'os.environ.get', 'os.environ.get', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (2588, 2609), False, 'import os\n'), ((2637, 2676), 'os.environ.get', 'os.environ.get', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (2651, 2676), False, 'import os\n'), ((2704, 2745), 'os.environ.get', 'os.environ.get', (['"""AWS_STORAGE_BUCKET_NAME"""'], {}), "('AWS_STORAGE_BUCKET_NAME')\n", (2718, 2745), False, 'import os\n'), ((2773, 2815), 'os.environ.get', 'os.environ.get', (['"""AWS_S3_SIGNATURE_VERSION"""'], {}), "('AWS_S3_SIGNATURE_VERSION')\n", (2787, 2815), False, 'import os\n'), ((2843, 2879), 'os.environ.get', 'os.environ.get', (['"""AWS_S3_REGION_NAME"""'], {}), "('AWS_S3_REGION_NAME')\n", (2857, 2879), False, 'import os\n'), ((4557, 4622), 'firebase_admin.credentials.Certificate', 'credentials.Certificate', (['(BASE_DIR + firebase_service_account_path)'], {}), '(BASE_DIR + firebase_service_account_path)\n', (4580, 4622), False, 'from firebase_admin import credentials\n'), ((4623, 4674), 'firebase_admin.initialize_app', 'firebase_admin.initialize_app', (['FIREBASE_CREDENTIALS'], {}), '(FIREBASE_CREDENTIALS)\n', (4652, 4674), False, 'import firebase_admin\n'), ((5522, 5545), 'os.environ.get', 'os.environ.get', (['"""EMAIL"""'], {}), "('EMAIL')\n", (5536, 5545), False, 'import os\n'), ((5567, 5590), 'os.environ.get', 'os.environ.get', (['"""EMAIL"""'], {}), "('EMAIL')\n", (5581, 5590), False, 'import os\n'), ((5613, 5645), 'os.environ.get', 'os.environ.get', (['"""EMAIL_PASSWORD"""'], {}), "('EMAIL_PASSWORD')\n", (5627, 5645), False, 'import os\n'), ((550, 575), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (565, 575), False, 'import os\n'), ((3822, 3855), 'os.environ.get', 'os.environ.get', (['"""DATABASE_ENGINE"""'], {}), "('DATABASE_ENGINE')\n", (3836, 3855), False, 'import os\n'), ((3878, 3909), 'os.environ.get', 'os.environ.get', (['"""DATABASE_NAME"""'], {}), "('DATABASE_NAME')\n", (3892, 3909), False, 'import os\n'), ((3932, 3963), 'os.environ.get', 'os.environ.get', (['"""DATABASE_USER"""'], {}), "('DATABASE_USER')\n", (3946, 3963), False, 'import os\n'), ((3986, 4021), 'os.environ.get', 'os.environ.get', (['"""DATABASE_PASSWORD"""'], {}), "('DATABASE_PASSWORD')\n", (4000, 4021), False, 'import os\n'), ((4044, 4075), 'os.environ.get', 'os.environ.get', (['"""DATABASE_HOST"""'], {}), "('DATABASE_HOST')\n", (4058, 4075), False, 'import os\n'), ((4098, 4129), 'os.environ.get', 'os.environ.get', (['"""DATABASE_PORT"""'], {}), "('DATABASE_PORT')\n", (4112, 4129), False, 'import os\n'), ((4176, 4209), 'os.environ.get', 'os.environ.get', (['"""DATABASE_ENGINE"""'], {}), "('DATABASE_ENGINE')\n", (4190, 4209), False, 'import os\n')]
|
import asyncio
# from aiorpcgrid.client import Client
from aiorpcgrid.task import AsyncTask, State
class AsyncClient:
_provider = None
_method = None
_requests: dict = {}
_running = True
_request_queue: asyncio.Queue = asyncio.Queue()
_loop = None
def __init__(self, provider, loop=None):
self._provider = provider
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
async def open(self):
await self._provider.open()
asyncio.ensure_future(self.request_loop(), loop=self._loop)
asyncio.ensure_future(self.run(), loop=self._loop)
return self
async def close(self):
self._running = False
await self._provider.close()
await self._request_queue.put(None)
async def request_loop(self):
while self._running:
task = await self._request_queue.get()
if task is not None:
await self.provider.call_method(task)
task.status = State.RUNNING
if self._request_queue.empty():
self._request_queue.task_done()
async def run(self):
while self._running:
responses = await self._provider.recv()
if responses is not None:
for response in responses:
if response.id in self._requests:
task = self._requests[response.id]
task.result = response.result
task.error = response.error
if task.error is None:
self._requests[
response.id
].status = State.COMPLETED
else:
self._requests[response.id].status = State.FAILED
task.event.set()
del self._requests[response.id]
if task._callback is not None:
asyncio.ensure_future(
task.callback(task), loop=self._loop
)
def __call__(self, *args, **kwargs):
if not self.provider.is_connected():
raise ConnectionError(f'Connection lost. {self._provider}')
task = AsyncTask().create(self._method, *args, **kwargs)
if 'parallel' in kwargs:
task._parallel = kwargs['parallel']
self._method = None
task.status = State.PENDING
self._requests[task.id] = task
self._request_queue.put_nowait(self._requests[task.id])
return self._requests[task.id]
|
[
"asyncio.get_event_loop",
"asyncio.Queue",
"aiorpcgrid.task.AsyncTask"
] |
[((242, 257), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (255, 257), False, 'import asyncio\n'), ((399, 423), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (421, 423), False, 'import asyncio\n'), ((2319, 2330), 'aiorpcgrid.task.AsyncTask', 'AsyncTask', ([], {}), '()\n', (2328, 2330), False, 'from aiorpcgrid.task import AsyncTask, State\n')]
|
# Copyright 2020, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: usage
"""
The opentelemetry-instrumentation-aws-lambda package allows tracing AWS
Lambda function.
Usage
-----
.. code:: python
# Copy this snippet into AWS Lambda function
# Ref Doc: https://docs.aws.amazon.com/lambda/latest/dg/lambda-python.html
import boto3
from opentelemetry.instrumentation.aws_lambda import (
AwsLambdaInstrumentor
)
# Enable instrumentation
AwsLambdaInstrumentor().instrument()
# Lambda function
def lambda_handler(event, context):
s3 = boto3.resource('s3')
for bucket in s3.buckets.all():
print(bucket.name)
return "200 OK"
API
---
"""
import logging
import os
from importlib import import_module
from wrapt import wrap_function_wrapper
# TODO: aws propagator
from opentelemetry.sdk.extension.aws.trace.propagation.aws_xray_format import (
AwsXRayFormat,
)
from opentelemetry.instrumentation.aws_lambda.version import __version__
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.instrumentation.utils import unwrap
from opentelemetry.trace import SpanKind, get_tracer, get_tracer_provider
logger = logging.getLogger(__name__)
class AwsLambdaInstrumentor(BaseInstrumentor):
def _instrument(self, **kwargs):
self._tracer = get_tracer(__name__, __version__, kwargs.get("tracer_provider"))
self._tracer_provider = get_tracer_provider()
lambda_handler = os.environ.get("ORIG_HANDLER", os.environ.get("_HANDLER"))
wrapped_names = lambda_handler.rsplit(".", 1)
self._wrapped_module_name = wrapped_names[0]
self._wrapped_function_name = wrapped_names[1]
wrap_function_wrapper(
self._wrapped_module_name,
self._wrapped_function_name,
self._functionPatch,
)
def _uninstrument(self, **kwargs):
unwrap(
import_module(self._wrapped_module_name),
self._wrapped_function_name,
)
def _functionPatch(self, original_func, instance, args, kwargs):
lambda_context = args[1]
ctx_aws_request_id = lambda_context.aws_request_id
ctx_invoked_function_arn = lambda_context.invoked_function_arn
orig_handler = os.environ.get("ORIG_HANDLER", os.environ.get("_HANDLER"))
# TODO: enable propagate from AWS by env variable
xray_trace_id = os.environ.get("_X_AMZN_TRACE_ID", "")
lambda_name = os.environ.get("AWS_LAMBDA_FUNCTION_NAME")
function_version = os.environ.get("AWS_LAMBDA_FUNCTION_VERSION")
propagator = AwsXRayFormat()
parent_context = propagator.extract({"X-Amzn-Trace-Id": xray_trace_id})
with self._tracer.start_as_current_span(
name=orig_handler, context=parent_context, kind=SpanKind.SERVER
) as span:
# Refer: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/faas.md#example
span.set_attribute("faas.execution", ctx_aws_request_id)
span.set_attribute("faas.id", ctx_invoked_function_arn)
# TODO: fix in Collector because they belong resource attrubutes
span.set_attribute("faas.name", lambda_name)
span.set_attribute("faas.version", function_version)
result = original_func(*args, **kwargs)
# force_flush before function quit in case of Lambda freeze.
self._tracer_provider.force_flush()
return result
|
[
"logging.getLogger",
"opentelemetry.sdk.extension.aws.trace.propagation.aws_xray_format.AwsXRayFormat",
"importlib.import_module",
"os.environ.get",
"opentelemetry.trace.get_tracer_provider",
"wrapt.wrap_function_wrapper"
] |
[((1755, 1782), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1772, 1782), False, 'import logging\n'), ((1990, 2011), 'opentelemetry.trace.get_tracer_provider', 'get_tracer_provider', ([], {}), '()\n', (2009, 2011), False, 'from opentelemetry.trace import SpanKind, get_tracer, get_tracer_provider\n'), ((2268, 2371), 'wrapt.wrap_function_wrapper', 'wrap_function_wrapper', (['self._wrapped_module_name', 'self._wrapped_function_name', 'self._functionPatch'], {}), '(self._wrapped_module_name, self.\n _wrapped_function_name, self._functionPatch)\n', (2289, 2371), False, 'from wrapt import wrap_function_wrapper\n'), ((2973, 3011), 'os.environ.get', 'os.environ.get', (['"""_X_AMZN_TRACE_ID"""', '""""""'], {}), "('_X_AMZN_TRACE_ID', '')\n", (2987, 3011), False, 'import os\n'), ((3035, 3077), 'os.environ.get', 'os.environ.get', (['"""AWS_LAMBDA_FUNCTION_NAME"""'], {}), "('AWS_LAMBDA_FUNCTION_NAME')\n", (3049, 3077), False, 'import os\n'), ((3105, 3150), 'os.environ.get', 'os.environ.get', (['"""AWS_LAMBDA_FUNCTION_VERSION"""'], {}), "('AWS_LAMBDA_FUNCTION_VERSION')\n", (3119, 3150), False, 'import os\n'), ((3173, 3188), 'opentelemetry.sdk.extension.aws.trace.propagation.aws_xray_format.AwsXRayFormat', 'AwsXRayFormat', ([], {}), '()\n', (3186, 3188), False, 'from opentelemetry.sdk.extension.aws.trace.propagation.aws_xray_format import AwsXRayFormat\n'), ((2069, 2095), 'os.environ.get', 'os.environ.get', (['"""_HANDLER"""'], {}), "('_HANDLER')\n", (2083, 2095), False, 'import os\n'), ((2482, 2522), 'importlib.import_module', 'import_module', (['self._wrapped_module_name'], {}), '(self._wrapped_module_name)\n', (2495, 2522), False, 'from importlib import import_module\n'), ((2862, 2888), 'os.environ.get', 'os.environ.get', (['"""_HANDLER"""'], {}), "('_HANDLER')\n", (2876, 2888), False, 'import os\n')]
|
# Generated by Django 4.0.2 on 2022-04-01 16:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instructors', '0020_alter_user_description_alter_user_title'),
]
operations = [
migrations.AlterField(
model_name='user',
name='avatar_url',
field=models.ImageField(default='profile_pics/einstein_EqBibwO.jpeg', upload_to='profile_pics'),
),
]
|
[
"django.db.models.ImageField"
] |
[((363, 457), 'django.db.models.ImageField', 'models.ImageField', ([], {'default': '"""profile_pics/einstein_EqBibwO.jpeg"""', 'upload_to': '"""profile_pics"""'}), "(default='profile_pics/einstein_EqBibwO.jpeg', upload_to=\n 'profile_pics')\n", (380, 457), False, 'from django.db import migrations, models\n')]
|
"""
@author : <NAME>
"""
from __future__ import division
import sys
import unittest
from nose.plugins.skip import SkipTest
from jv import JvWorker
from quantecon import compute_fixed_point
from quantecon.tests import get_h5_data_file, write_array, max_abs_diff
# specify params -- use defaults
A = 1.4
alpha = 0.6
beta = 0.96
grid_size = 50
if sys.version_info[0] == 2:
v_nm = "V"
else: # python 3
raise SkipTest("Python 3 tests aren't ready.")
v_nm = "V_py3"
def _new_solution(jv, f, grp):
"gets new solution and updates data file"
V = _solve_via_vfi(jv)
write_array(f, grp, V, v_nm)
return V
def _solve_via_vfi(jv):
"compute policy rules via value function iteration"
v_init = jv.x_grid * 0.6
V = compute_fixed_point(jv.bellman_operator, v_init,
max_iter=3000,
error_tol=1e-5)
return V
def _get_vf_guess(jv, force_new=False):
with get_h5_data_file() as f:
# See if the jv group already exists
group_existed = True
try:
jv_group = f.getNode("/jv")
except:
# doesn't exist
group_existed = False
jv_group = f.create_group("/", "jv", "data for jv.py tests")
if force_new or not group_existed:
# group doesn't exist, or forced to create new data.
# This function updates f in place and returns v_vfi, c_vfi, c_pfi
V = _new_solution(jv, f, jv_group)
return V
# if we made it here, the group exists and we should try to read
# existing solutions
try:
# Try reading vfi
if sys.version_info[0] == 2:
V = jv_group.V[:]
else: # python 3
V = jv_group.V_py3[:]
except:
# doesn't exist. Let's create it
V = _new_solution(jv, f, jv_group)
return V
class TestJvWorkder(unittest.TestCase):
@classmethod
def setUpClass(cls):
jv = JvWorker(A=A, alpha=alpha, beta=beta, grid_size=grid_size)
cls.jv = jv
# compute solution
v_init = _get_vf_guess(jv)
cls.V = compute_fixed_point(jv.bellman_operator, v_init)
cls.s_pol, cls.phi_pol = jv.bellman_operator(cls.V * 0.999,
return_policies=True)
def test_low_x_prefer_s(self):
"jv: s preferred to phi with low x?"
# low x is an early index
self.assertGreaterEqual(self.s_pol[0], self.phi_pol[0])
def test_high_x_prefer_phi(self):
"jv: phi preferred to s with high x?"
# low x is an early index
self.assertGreaterEqual(self.phi_pol[-1], self.s_pol[-1])
def test_policy_sizes(self):
"jv: policies correct size"
n = self.jv.x_grid.size
self.assertEqual(self.s_pol.size, n)
self.assertEqual(self.phi_pol.size, n)
def test_bellman_sol_fixed_point(self):
"jv: solution to bellman is fixed point"
new_V = self.jv.bellman_operator(self.V)
self.assertLessEqual(max_abs_diff(new_V, self.V), 1e-4)
|
[
"jv.JvWorker",
"quantecon.tests.write_array",
"quantecon.tests.max_abs_diff",
"quantecon.tests.get_h5_data_file",
"nose.plugins.skip.SkipTest",
"quantecon.compute_fixed_point"
] |
[((417, 457), 'nose.plugins.skip.SkipTest', 'SkipTest', (['"""Python 3 tests aren\'t ready."""'], {}), '("Python 3 tests aren\'t ready.")\n', (425, 457), False, 'from nose.plugins.skip import SkipTest\n'), ((587, 615), 'quantecon.tests.write_array', 'write_array', (['f', 'grp', 'V', 'v_nm'], {}), '(f, grp, V, v_nm)\n', (598, 615), False, 'from quantecon.tests import get_h5_data_file, write_array, max_abs_diff\n'), ((749, 834), 'quantecon.compute_fixed_point', 'compute_fixed_point', (['jv.bellman_operator', 'v_init'], {'max_iter': '(3000)', 'error_tol': '(1e-05)'}), '(jv.bellman_operator, v_init, max_iter=3000, error_tol=1e-05\n )\n', (768, 834), False, 'from quantecon import compute_fixed_point\n'), ((949, 967), 'quantecon.tests.get_h5_data_file', 'get_h5_data_file', ([], {}), '()\n', (965, 967), False, 'from quantecon.tests import get_h5_data_file, write_array, max_abs_diff\n'), ((2020, 2078), 'jv.JvWorker', 'JvWorker', ([], {'A': 'A', 'alpha': 'alpha', 'beta': 'beta', 'grid_size': 'grid_size'}), '(A=A, alpha=alpha, beta=beta, grid_size=grid_size)\n', (2028, 2078), False, 'from jv import JvWorker\n'), ((2178, 2226), 'quantecon.compute_fixed_point', 'compute_fixed_point', (['jv.bellman_operator', 'v_init'], {}), '(jv.bellman_operator, v_init)\n', (2197, 2226), False, 'from quantecon import compute_fixed_point\n'), ((3100, 3127), 'quantecon.tests.max_abs_diff', 'max_abs_diff', (['new_V', 'self.V'], {}), '(new_V, self.V)\n', (3112, 3127), False, 'from quantecon.tests import get_h5_data_file, write_array, max_abs_diff\n')]
|
"""Config
This module is in charge of providing all the necessary settings to
the rest of the modules in excentury.
"""
import os
import re
import sys
import textwrap
import argparse
from collections import OrderedDict
from excentury.command import error, trace, import_mod
DESC = """Edit a configuration file for excentury.
Some actions performed by excentury can be overwritten by using
configuration files.
To see the values that the configuration file can overwrite use the
`defaults` command. This will print a list of the keys and values
excentury uses for the given command.
"""
RE = re.compile(r'\${(?P<key>.*?)}')
RE_IF = re.compile(
r'(?P<iftrue>.*?) IF\[\[(?P<cond>.*?)\]\]'
)
RE_IFELSE = re.compile(
r'(?P<iftrue>.*?) IF\[\[(?P<cond>.*?)\]\]ELSE (?P<iffalse>.*)'
)
def disp(msg):
"""Wrapper around sys.stdout.write which is meant to behave as
the print function but it does not add the newline character. """
sys.stdout.write(msg)
def _replacer(*key_val):
"""Helper function for replace.
Source: <http://stackoverflow.com/a/15221068/788553>
"""
replace_dict = dict(key_val)
replacement_function = lambda match: replace_dict[match.group(0)]
pattern = re.compile("|".join([re.escape(k) for k, _ in key_val]), re.M)
return lambda string: pattern.sub(replacement_function, string)
def replace(string, *key_val):
"""Replacement of strings done in one pass. Example:
>>> replace("a < b && b < c", ('<', '<'), ('&', '&'))
'a < b && b < c'
Source: <http://stackoverflow.com/a/15221068/788553>
"""
return _replacer(*key_val)(string)
class ConfigDispAction(argparse.Action): # pylint: disable=R0903
"""Derived argparse Action class to use when displaying the
configuration file and location."""
def __call__(self, parser, namespace, values, option_string=None):
try:
read_config(namespace)
except IOError:
disp('xcpp.config not found in %r\n' % namespace.cfg)
else:
disp('path to xcpp.config: "%s"\n' % namespace.cfg)
with open('%s/xcpp.config' % namespace.cfg, 'r') as _fp:
disp(_fp.read())
exit(0)
def add_parser(subp, raw):
"Add a parser to the main subparser. "
tmpp = subp.add_parser('config', help='configure excentury',
formatter_class=raw,
description=textwrap.dedent(DESC))
tmpp.add_argument('var', type=str, nargs='?', default=None,
help='Must be in the form of sec.key')
tmpp.add_argument('-v', action='store_true',
help='print config file location')
tmpp.add_argument('--print', action=ConfigDispAction,
nargs=0,
help='print config file and exit')
def _get_replacements(tokens, data, sec):
"""Helper function for _read_config. """
replacements = list()
for token in tokens:
if ':' in token:
tsec, tkey = token.split(':')
tval = ''
if tsec in data:
if tkey in data[tsec]:
tval = data[tsec][tkey]
else:
if token in data[sec]:
tval = data[sec][token]
else:
tval = ''
replacements.append(
('${%s}' % token, tval)
)
return replacements
# pylint: disable=invalid-name
# ARG and CFG are names that may be used in the configuration file.
# ARG gives us access to the command line arguments and CFG gives us
# access to the current configuration. Note that using CFG[key][sec]
# is equivalent to ${key:sec}. These names go against the convention
# so that they may be easy to spot in a configuration file.
def _eval_condition(cond, ARG, CFG, line_num, fname):
"""Evaluates a string using the eval function. It prints a
warning if there are any errors. Returns the result of the
evaluation and an error number: 0 if everything is fine, 1 if
there was an error. """
ARG.FILEPATH = '%s/%s/%s' % (ARG.cfg, CFG['xcpp']['path'], ARG.inputfile)
try:
# pylint: disable=eval-used
# To be able to evaluate a condition without creating a whole
# new parser we can use the eval function. We could have use
# a python file as a configuration but then there would be
# no simple structure to the files.
cond = eval(cond)
enum = 0
# pylint: disable=broad-except
# Anything can go wrong during the execution of the `eval`
# function. For this reason we must try to catch anything that
# may come our way so that we may give out a warning message
# and ignore it.
except Exception as exception:
cond = None
enum = 1
trace(
'WARNING: error in line %d of %r: %s\n' % (
line_num, fname, exception.message
)
)
return cond, enum
def _read_config(fname, arg):
"""Simple parser to read configuration files. """
data = OrderedDict()
sec = None
line_num = 0
with open(fname, 'r') as fhandle:
for line in fhandle:
line_num += 1
if line[0] == '[':
sec = line[1:-2]
data[sec] = OrderedDict()
elif '=' in line:
tmp = line.split('=', 1)
key = tmp[0].strip()
val = tmp[1].strip()
val = os.path.expandvars(val)
replacements = _get_replacements(
RE.findall(val), data, sec
)
# pylint: disable=star-args
if replacements:
val = replace(val, *replacements)
match = RE_IFELSE.match(val)
if match:
cond, enum = _eval_condition(
match.group('cond'), arg, data, line_num, fname
)
if enum == 1:
continue
groups = match.groups()
val = groups[0] if cond else groups[2]
else:
match = RE_IF.match(val)
if match:
cond, enum = _eval_condition(
match.group('cond'), arg, data, line_num, fname
)
if enum == 1:
continue
if cond:
val = match.group('iftrue')
else:
continue
data[sec][key] = val
return data
def read_config(arg):
"""Read the configuration file xcpp.config"""
path = arg.cfg
if path == '.' and not os.path.exists('xcpp.config'):
if 'XCPP_CONFIG_PATH' in os.environ:
tmp_path = os.environ['XCPP_CONFIG_PATH']
if os.path.exists('%s/xcpp.config' % tmp_path):
trace("Configured with: '%s/xcpp.config'\n" % tmp_path)
path = tmp_path
elif not os.path.exists('%s/xcpp.config' % path):
error("ERROR: %s/xcpp.config does not exist\n" % path)
arg.cfg = path
try:
config = _read_config('%s/xcpp.config' % path, arg)
except IOError:
config = OrderedDict()
return config
def run(arg):
"""Run command. """
config = read_config(arg)
if arg.v:
disp('path to xcpp.config: "%s"\n' % arg.cfg)
if arg.var is None:
for sec in config:
disp('[%s]\n' % sec)
for key in config[sec]:
disp(' %s = %s\n' % (key, config[sec][key]))
disp('\n')
return
try:
command, var = arg.var.split('.', 1)
except ValueError:
error("ERROR: '%s' is not of the form sec.key\n" % arg.var)
try:
disp(config[command][var]+'\n')
except KeyError:
pass
return
def _update_single(cfg, name, defaults=None):
"Helper function for get_cfg."
if defaults:
for var, val in defaults.iteritems():
cfg[name][var] = os.path.expandvars(str(val))
else:
mod = import_mod('excentury.command.%s' % name)
if hasattr(mod, "DEFAULTS"):
for var, val in mod.DEFAULTS.iteritems():
cfg[name][var] = os.path.expandvars(val)
def _update_from_file(cfg, name, cfg_file):
"Helper function for get_cfg."
if name in cfg_file:
for var, val in cfg_file[name].iteritems():
cfg[name][var] = os.path.expandvars(val)
def _update_from_arg(cfg, argdict, key):
"Helper function for get_cfg."
for var in cfg[key]:
if var in argdict and argdict[var] is not None:
cfg[key][var] = argdict[var]
def get_cfg(arg, names, defaults=None):
"""Obtain the settings for a command. """
cfg = {
'xcpp': {
'root': '.',
'path': '.'
}
}
cfg_file = read_config(arg)
if 'xcpp' in cfg_file:
for var, val in cfg_file['xcpp'].iteritems():
cfg['xcpp'][var] = os.path.expandvars(val)
cfg['xcpp']['root'] = arg.cfg
if isinstance(names, list):
for name in names:
cfg[name] = dict()
_update_single(cfg, name)
_update_from_file(cfg, name, cfg_file)
else:
if names != 'xcpp':
cfg[names] = dict()
_update_single(cfg, names, defaults)
_update_from_file(cfg, names, cfg_file)
argdict = vars(arg)
if arg.parser_name in cfg:
_update_from_arg(cfg, argdict, arg.parser_name)
elif arg.parser_name == 'to' and arg.lang in cfg:
_update_from_arg(cfg, argdict, arg.lang)
_update_from_arg(cfg, argdict, 'xcpp')
return cfg
|
[
"textwrap.dedent",
"collections.OrderedDict",
"os.path.exists",
"excentury.command.error",
"re.escape",
"re.compile",
"os.path.expandvars",
"excentury.command.import_mod",
"excentury.command.trace",
"sys.stdout.write"
] |
[((599, 630), 're.compile', 're.compile', (['"""\\\\${(?P<key>.*?)}"""'], {}), "('\\\\${(?P<key>.*?)}')\n", (609, 630), False, 'import re\n'), ((639, 696), 're.compile', 're.compile', (['"""(?P<iftrue>.*?) IF\\\\[\\\\[(?P<cond>.*?)\\\\]\\\\]"""'], {}), "('(?P<iftrue>.*?) IF\\\\[\\\\[(?P<cond>.*?)\\\\]\\\\]')\n", (649, 696), False, 'import re\n'), ((712, 789), 're.compile', 're.compile', (['"""(?P<iftrue>.*?) IF\\\\[\\\\[(?P<cond>.*?)\\\\]\\\\]ELSE (?P<iffalse>.*)"""'], {}), "('(?P<iftrue>.*?) IF\\\\[\\\\[(?P<cond>.*?)\\\\]\\\\]ELSE (?P<iffalse>.*)')\n", (722, 789), False, 'import re\n'), ((951, 972), 'sys.stdout.write', 'sys.stdout.write', (['msg'], {}), '(msg)\n', (967, 972), False, 'import sys\n'), ((5095, 5108), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5106, 5108), False, 'from collections import OrderedDict\n'), ((8212, 8253), 'excentury.command.import_mod', 'import_mod', (["('excentury.command.%s' % name)"], {}), "('excentury.command.%s' % name)\n", (8222, 8253), False, 'from excentury.command import error, trace, import_mod\n'), ((2456, 2477), 'textwrap.dedent', 'textwrap.dedent', (['DESC'], {}), '(DESC)\n', (2471, 2477), False, 'import textwrap\n'), ((4838, 4928), 'excentury.command.trace', 'trace', (["('WARNING: error in line %d of %r: %s\\n' % (line_num, fname, exception.message)\n )"], {}), "('WARNING: error in line %d of %r: %s\\n' % (line_num, fname, exception\n .message))\n", (4843, 4928), False, 'from excentury.command import error, trace, import_mod\n'), ((6819, 6848), 'os.path.exists', 'os.path.exists', (['"""xcpp.config"""'], {}), "('xcpp.config')\n", (6833, 6848), False, 'import os\n'), ((6964, 7007), 'os.path.exists', 'os.path.exists', (["('%s/xcpp.config' % tmp_path)"], {}), "('%s/xcpp.config' % tmp_path)\n", (6978, 7007), False, 'import os\n'), ((7126, 7165), 'os.path.exists', 'os.path.exists', (["('%s/xcpp.config' % path)"], {}), "('%s/xcpp.config' % path)\n", (7140, 7165), False, 'import os\n'), ((7175, 7229), 'excentury.command.error', 'error', (["('ERROR: %s/xcpp.config does not exist\\n' % path)"], {}), "('ERROR: %s/xcpp.config does not exist\\n' % path)\n", (7180, 7229), False, 'from excentury.command import error, trace, import_mod\n'), ((7355, 7368), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7366, 7368), False, 'from collections import OrderedDict\n'), ((7830, 7889), 'excentury.command.error', 'error', (['("ERROR: \'%s\' is not of the form sec.key\\n" % arg.var)'], {}), '("ERROR: \'%s\' is not of the form sec.key\\n" % arg.var)\n', (7835, 7889), False, 'from excentury.command import error, trace, import_mod\n'), ((8589, 8612), 'os.path.expandvars', 'os.path.expandvars', (['val'], {}), '(val)\n', (8607, 8612), False, 'import os\n'), ((9140, 9163), 'os.path.expandvars', 'os.path.expandvars', (['val'], {}), '(val)\n', (9158, 9163), False, 'import os\n'), ((1240, 1252), 're.escape', 're.escape', (['k'], {}), '(k)\n', (1249, 1252), False, 'import re\n'), ((5326, 5339), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5337, 5339), False, 'from collections import OrderedDict\n'), ((7025, 7080), 'excentury.command.trace', 'trace', (['("Configured with: \'%s/xcpp.config\'\\n" % tmp_path)'], {}), '("Configured with: \'%s/xcpp.config\'\\n" % tmp_path)\n', (7030, 7080), False, 'from excentury.command import error, trace, import_mod\n'), ((8378, 8401), 'os.path.expandvars', 'os.path.expandvars', (['val'], {}), '(val)\n', (8396, 8401), False, 'import os\n'), ((5507, 5530), 'os.path.expandvars', 'os.path.expandvars', (['val'], {}), '(val)\n', (5525, 5530), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
'''
Created on Thu Nov 19 20:52:33 2015
@author: SW274998
'''
from nseta.common.commons import *
import datetime
import unittest
import time
from bs4 import BeautifulSoup
from tests import htmls
import json
import requests
import six
from nseta.common.urls import *
import nseta.common.urls as urls
from six.moves.urllib.parse import urlparse
from baseUnitTest import baseUnitTest
class TestUrls(baseUnitTest):
def setUp(self, redirect_logs=True):
super().setUp()
proxy_on = False
if proxy_on:
urls.session.proxies.update({'http': 'proxy1.wipro.com:8080'})
def runTest(self):
for key in TestUrls.__dict__.keys():
if key.find('test') == 0:
TestUrls.__dict__[key](self)
def test_get_symbol_count(self):
count = get_symbol_count(symbol='SBIN')
self.assertEqual(count, '1')
force_count = get_symbol_count(symbol='SBIN', force_refresh=True)
self.assertEqual(force_count, '1')
def test_equity_history_url(self):
sym_count = get_symbol_count(symbol='SBIN')
txt = 'Data for SBIN - EQ'
resp = equity_history_url(symbol='SBIN',
symbolCount=sym_count,
series='EQ',
fromDate='01-01-2000',
toDate='10-01-2000',
dateRange='')
self.assertGreaterEqual(resp.text.find(txt), 0, resp.text)
def test_nse_intraday_url(self):
txt = 'date|g1_o|g1_h|g1_l|g1_c|g2|g2_CUMVOL' #'<columns><column>date</column><column>pltp</column><column>nltp</column><column>previousclose</column><column>allltp</column>'
resp = nse_intraday_url(CDSymbol='SBIN', Periodicity='1')
self.assertIn(txt, resp.text)
def test_price_list_url(self):
resp = price_list_url('2019', 'DEC', '31DEC2019')
csv = unzip_str(resp.content)
self.assertGreaterEqual(csv.find('SBIN'), 0)
def tests_daily_volatility_url(self):
resp = daily_volatility_url('19112015')
self.assertGreaterEqual(resp.text.find('SBIN'), 0)
def test_pr_price_list_zipped_url(self):
resp = pr_price_list_zipped_url('191115')
csv = unzip_str(resp.content)
def test_index_history_url(self):
resp = index_history_url(indexType='NIFTY 50',
fromDate='01-01-2015',
toDate='10-01-2015')
self.assertGreaterEqual(resp.text.find('High'), 0)
self.assertGreaterEqual(resp.text.find('Low'), 0)
def test_index_daily_snapshot_url(self):
resp = index_daily_snapshot_url('06012020')
csv = str(resp.content)
self.assertGreaterEqual(csv.find('Nifty 50'), 0)
self.assertGreaterEqual(csv.find('Nifty IT'), 0)
self.assertGreaterEqual(csv.find('Nifty Bank'), 0)
self.assertGreaterEqual(csv.find('Nifty Next 50'), 0)
def test_index_pe_history_url(self):
resp = index_pe_history_url(fromDate='01-01-2015',
toDate='10-01-2015',
indexName='NIFTY 50')
self.assertGreaterEqual(resp.text.find('<th>P/E'), 0)
self.assertGreaterEqual(resp.text.find('<th>P/B'), 0)
def test_index_vix_history_url(self):
resp = index_vix_history_url(fromDate='01-Jan-2015',
toDate='10-Jan-2015',
)
self.assertGreaterEqual(resp.text.find('VIX'), 0)
self.assertGreaterEqual(resp.text.find('Change'), 0)
def test_derivative_derivative_expiry_dates_url(self):
resp = derivative_expiry_dates_url()
self.assertGreaterEqual(resp.text.find('vixExpryDt'), 0)
def test_derivative_history_url(self):
resp = derivative_history_url(instrumentType='FUTIDX',
symbol='NIFTY',
expiryDate='26-12-2019',
optionType='select',
strikePrice='',
dateRange='',
fromDate='25-Dec-2019',
toDate='26-Dec-2019')
self.assertGreaterEqual(resp.text.find('NIFTY'), 0)
self.assertGreaterEqual(resp.text.find('Expiry'), 0)
def test_derivative_price_list_url(self):
resp = derivative_price_list_url('2019', 'JUL', '19JUL2019')
csv = unzip_str(resp.content)
def tearDown(self):
super().tearDown()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUrls)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if six.PY2:
if result.wasSuccessful():
print('tests OK')
for (test, error) in result.errors:
print('=========Error in: %s===========' % test)
print(error)
print('======================================')
for (test, failures) in result.failures:
print('=========Error in: %s===========' % test)
print(failures)
print('======================================')
|
[
"unittest.TextTestRunner",
"unittest.TestLoader",
"nseta.common.urls.session.proxies.update"
] |
[((541, 603), 'nseta.common.urls.session.proxies.update', 'urls.session.proxies.update', (["{'http': 'proxy1.wipro.com:8080'}"], {}), "({'http': 'proxy1.wipro.com:8080'})\n", (568, 603), True, 'import nseta.common.urls as urls\n'), ((4169, 4190), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (4188, 4190), False, 'import unittest\n'), ((4234, 4270), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (4257, 4270), False, 'import unittest\n')]
|
from django import forms
from .models import Account
from common.models import Comment, Attachments
from leads.models import Lead
from contacts.models import Contact
from django.db.models import Q
class AccountForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
account_view = kwargs.pop('account', False)
request_user = kwargs.pop('request_user', None)
super(AccountForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
self.fields['description'].widget.attrs.update({'rows': '8'})
self.fields['status'].choices = [
(each[0], each[1]) for each in Account.ACCOUNT_STATUS_CHOICE]
self.fields['status'].required = False
for key, value in self.fields.items():
if key == 'phone':
value.widget.attrs['placeholder'] = "+91-123-456-7890"
else:
value.widget.attrs['placeholder'] = value.label
self.fields['billing_address_line'].widget.attrs.update({
'placeholder': 'Address Line'})
self.fields['billing_street'].widget.attrs.update({
'placeholder': 'Street'})
self.fields['billing_city'].widget.attrs.update({
'placeholder': 'City'})
self.fields['billing_state'].widget.attrs.update({
'placeholder': 'State'})
self.fields['billing_postcode'].widget.attrs.update({
'placeholder': 'Postcode'})
self.fields["billing_country"].choices = [
("", "--Country--"), ] + list(self.fields["billing_country"].choices)[1:]
self.fields["lead"].queryset = Lead.objects.all(
).exclude(status='closed')
if request_user:
self.fields["lead"].queryset = Lead.objects.filter(
Q(assigned_to__in=[request_user]) | Q(created_by=request_user)).exclude(status='closed')
self.fields["contacts"].queryset = Contact.objects.filter(
Q(assigned_to__in=[request_user]) | Q(created_by=request_user))
if account_view:
self.fields['billing_address_line'].required = True
self.fields['billing_street'].required = True
self.fields['billing_city'].required = True
self.fields['billing_state'].required = True
self.fields['billing_postcode'].required = True
self.fields['billing_country'].required = True
class Meta:
model = Account
fields = ('name', 'phone', 'email', 'website', 'industry',
'description', 'status',
'billing_address_line', 'billing_street',
'billing_city', 'billing_state',
'billing_postcode', 'billing_country', 'lead', 'contacts')
class AccountCommentForm(forms.ModelForm):
comment = forms.CharField(max_length=64, required=True)
class Meta:
model = Comment
fields = ('comment', 'account', 'commented_by')
class AccountAttachmentForm(forms.ModelForm):
attachment = forms.FileField(max_length=1001, required=True)
class Meta:
model = Attachments
fields = ('attachment', 'account')
|
[
"django.forms.FileField",
"leads.models.Lead.objects.all",
"django.forms.CharField",
"django.db.models.Q"
] |
[((2863, 2908), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(64)', 'required': '(True)'}), '(max_length=64, required=True)\n', (2878, 2908), False, 'from django import forms\n'), ((3071, 3118), 'django.forms.FileField', 'forms.FileField', ([], {'max_length': '(1001)', 'required': '(True)'}), '(max_length=1001, required=True)\n', (3086, 3118), False, 'from django import forms\n'), ((1687, 1705), 'leads.models.Lead.objects.all', 'Lead.objects.all', ([], {}), '()\n', (1703, 1705), False, 'from leads.models import Lead\n'), ((2021, 2054), 'django.db.models.Q', 'Q', ([], {'assigned_to__in': '[request_user]'}), '(assigned_to__in=[request_user])\n', (2022, 2054), False, 'from django.db.models import Q\n'), ((2057, 2083), 'django.db.models.Q', 'Q', ([], {'created_by': 'request_user'}), '(created_by=request_user)\n', (2058, 2083), False, 'from django.db.models import Q\n'), ((1845, 1878), 'django.db.models.Q', 'Q', ([], {'assigned_to__in': '[request_user]'}), '(assigned_to__in=[request_user])\n', (1846, 1878), False, 'from django.db.models import Q\n'), ((1881, 1907), 'django.db.models.Q', 'Q', ([], {'created_by': 'request_user'}), '(created_by=request_user)\n', (1882, 1907), False, 'from django.db.models import Q\n')]
|
#
# Copyright 2018 PyWren Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import logging
import random
from pywren_ibm_cloud.cf_connector import CloudFunctions
logger = logging.getLogger(__name__)
class IBMCloudFunctionsInvoker:
def __init__(self, cf_config, retry_config):
self.namespace = cf_config['namespace']
self.endpoint = cf_config['endpoint']
self.cf_action_name = cf_config['action_name'] # Runtime
self.invocation_retry = retry_config['invocation_retry']
self.retry_sleeps = retry_config['retry_sleeps']
self.retries = retry_config['retries']
self.client = CloudFunctions(cf_config)
log_msg = 'IBM Cloud Functions init for {}'.format(self.cf_action_name)
logger.info(log_msg)
if(logger.getEffectiveLevel() == logging.WARNING):
print(log_msg)
def invoke(self, payload):
"""
Invoke -- return information about this invocation
"""
act_id = self.client.invoke(self.cf_action_name, payload)
attempts = 1
while not act_id and self.invocation_retry and attempts < self.retries:
attempts += 1
selected_sleep = random.choice(self.retry_sleeps)
exec_id = payload['executor_id']
call_id = payload['call_id']
log_msg = ('Executor ID {} Function {} - Invocation failed - retry {} in {} seconds'.format(exec_id, call_id, attempts, selected_sleep))
logger.debug(log_msg)
time.sleep(selected_sleep)
act_id = self.client.invoke(self.cf_action_name, payload)
return act_id
def config(self):
"""
Return config dict
"""
return {'cf_action_name': self.cf_action_name,
'cf_namespace': self.namespace,
'cf_endpoint': self.endpoint}
|
[
"logging.getLogger",
"random.choice",
"time.sleep",
"pywren_ibm_cloud.cf_connector.CloudFunctions"
] |
[((688, 715), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (705, 715), False, 'import logging\n'), ((1151, 1176), 'pywren_ibm_cloud.cf_connector.CloudFunctions', 'CloudFunctions', (['cf_config'], {}), '(cf_config)\n', (1165, 1176), False, 'from pywren_ibm_cloud.cf_connector import CloudFunctions\n'), ((1725, 1757), 'random.choice', 'random.choice', (['self.retry_sleeps'], {}), '(self.retry_sleeps)\n', (1738, 1757), False, 'import random\n'), ((2065, 2091), 'time.sleep', 'time.sleep', (['selected_sleep'], {}), '(selected_sleep)\n', (2075, 2091), False, 'import time\n')]
|
#-- coding: utf-8 --
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import time
import collections
from PIL import Image, ImageOps, ImageDraw, ImageFont
code_2_icono = collections.defaultdict(lambda : '38')
kor_2_eng = collections.defaultdict(lambda : 'UNKNOWN')
code_2_icono['SKY_O00'] = ['38']
code_2_icono['SKY_O01'] = ['01', '08']
code_2_icono['SKY_O02'] = ['02', '09']
code_2_icono['SKY_O03'] = ['03', '10']
code_2_icono['SKY_O04'] = ['12', '40']
code_2_icono['SKY_O05'] = ['13', '41']
code_2_icono['SKY_O06'] = ['14', '42']
code_2_icono['SKY_O07'] = ['18']
code_2_icono['SKY_O08'] = ['21']
code_2_icono['SKY_O09'] = ['32']
code_2_icono['SKY_O10'] = ['04']
code_2_icono['SKY_O11'] = ['29']
code_2_icono['SKY_O12'] = ['26']
code_2_icono['SKY_O13'] = ['27']
code_2_icono['SKY_O14'] = ['28']
code_2_icono['SKY_W00'] = ['38']
code_2_icono['SKY_W01'] = ['01', '08']
code_2_icono['SKY_W02'] = ['02', '09']
code_2_icono['SKY_W03'] = ['03', '10']
code_2_icono['SKY_W04'] = ['18']
code_2_icono['SKY_W07'] = ['21']
code_2_icono['SKY_W09'] = ['12', '40']
code_2_icono['SKY_W10'] = ['21']
code_2_icono['SKY_W11'] = ['04']
code_2_icono['SKY_W12'] = ['13', '41']
code_2_icono['SKY_W13'] = ['32']
kor_2_eng[u'좋음'] = ['GOOD']
kor_2_eng[u'보통'] = ['NORMAL']
kor_2_eng[u'나쁨'] = ['BAD']
kor_2_eng[u'매우 나쁨'] = ['V BAD']
def geticonfname(code, drawNight=False):
l = code_2_icono[code]
dname = os.path.join(os.path.dirname(__file__), "resources", "weather_icons_mod")
if len(l) > 1 and drawNight:
cur_hour = time.localtime().tm_hour
is_night = cur_hour < 5 or cur_hour > 18
if is_night:
return os.path.join(dname, l[1] + '.png')
else:
return os.path.join(dname, l[0] + '.png')
else:
return os.path.join(dname, l[0] + '.png')
BLACK = 0
WHITE = 1
class PapirusRenderer:
"""Renderer for Papirus HAT"""
def __init__(self, rotate=0, font_path=None):
if font_path:
self.font_path = font_path
else:
self.font_path = "/usr/share/fonts/truetype/freefont/FreeMono.ttf"
print("rotate:",rotate)
try:
from papirus import Papirus
self.papirus = Papirus(rotate=rotate)
self.canvas_size = self.papirus.size
print("papirus size : %s"%str(self.canvas_size))
except ImportError:
print("papirus import failed")
self.papirus = None
self.canvas_size = (264,176)
def render(self, weather, weather_forecast):
canvas = Image.new('1', self.canvas_size, WHITE)
print("font_path:",self.font_path)
fname = geticonfname(weather.weather_code, drawNight=True)
print("file:",fname)
self._drawImage(canvas, fname, 20,10,(100,100))
print("cur desc : %s"%str(weather.weather_desc))
print("cur airq : %s"%str(weather.air_quality))
temperature = str(weather.cur_temperature).split('.')[0] + u" \u2103"
self._drawText(canvas, temperature, 70,115, font_size=20, center_horizontal=True)
translated = kor_2_eng[weather.air_quality][0]
print("cur airq translated: %s"%translated)
self._drawText(canvas, translated, 70,140, font_size=20, center_horizontal=True)
base_x,base_y = 145,5
for i,w in enumerate(weather_forecast):
fname = geticonfname(w.weather_code)
self._drawImage(canvas, fname, base_x, base_y+55*i, (50,50))
temperature = str(w.min_temperature) + " / " + str(w.max_temperature)
self._drawText(canvas, temperature, base_x+80, base_y+28+55*i, font_size=14, center_horizontal=True)
# update time
self._drawText(canvas, time.strftime("%Y-%m-%d %H:%M",time.localtime()), 136, 165, font_size=9, center_horizontal=True)
if self.papirus == None:
# save a image for debugging purpose
with open("result.jpg", "wb") as fp:
canvas.save(fp)
print("result file saved")
else:
self.papirus.display(canvas)
self.papirus.update()
def _drawImage(self, canvas, image_path, x, y, size):
image = Image.open(image_path)
image = ImageOps.grayscale(image)
image = image.resize(size)
image = image.convert("1", dither=Image.FLOYDSTEINBERG)
canvas.paste(image,(x,y))
def _drawText(self, canvas, text, x, y, font_size=20, center_horizontal=False):
draw = ImageDraw.Draw(canvas)
font = ImageFont.truetype(self.font_path, font_size)
text_draw_size = draw.textsize(text, font=font)
if center_horizontal:
x = x - text_draw_size[0]/2
draw.text( (x, y) , text, font=font, fill=BLACK)
|
[
"papirus.Papirus",
"sys.setdefaultencoding",
"PIL.Image.open",
"PIL.Image.new",
"os.path.join",
"PIL.ImageFont.truetype",
"PIL.ImageOps.grayscale",
"os.path.dirname",
"PIL.ImageDraw.Draw",
"collections.defaultdict",
"time.localtime"
] |
[((45, 76), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (67, 76), False, 'import sys\n'), ((191, 229), 'collections.defaultdict', 'collections.defaultdict', (["(lambda : '38')"], {}), "(lambda : '38')\n", (214, 229), False, 'import collections\n'), ((242, 285), 'collections.defaultdict', 'collections.defaultdict', (["(lambda : 'UNKNOWN')"], {}), "(lambda : 'UNKNOWN')\n", (265, 285), False, 'import collections\n'), ((1426, 1451), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1441, 1451), False, 'import os\n'), ((1781, 1815), 'os.path.join', 'os.path.join', (['dname', "(l[0] + '.png')"], {}), "(dname, l[0] + '.png')\n", (1793, 1815), False, 'import os\n'), ((2576, 2615), 'PIL.Image.new', 'Image.new', (['"""1"""', 'self.canvas_size', 'WHITE'], {}), "('1', self.canvas_size, WHITE)\n", (2585, 2615), False, 'from PIL import Image, ImageOps, ImageDraw, ImageFont\n'), ((4217, 4239), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4227, 4239), False, 'from PIL import Image, ImageOps, ImageDraw, ImageFont\n'), ((4256, 4281), 'PIL.ImageOps.grayscale', 'ImageOps.grayscale', (['image'], {}), '(image)\n', (4274, 4281), False, 'from PIL import Image, ImageOps, ImageDraw, ImageFont\n'), ((4516, 4538), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['canvas'], {}), '(canvas)\n', (4530, 4538), False, 'from PIL import Image, ImageOps, ImageDraw, ImageFont\n'), ((4554, 4599), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['self.font_path', 'font_size'], {}), '(self.font_path, font_size)\n', (4572, 4599), False, 'from PIL import Image, ImageOps, ImageDraw, ImageFont\n'), ((1539, 1555), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1553, 1555), False, 'import time\n'), ((1653, 1687), 'os.path.join', 'os.path.join', (['dname', "(l[1] + '.png')"], {}), "(dname, l[1] + '.png')\n", (1665, 1687), False, 'import os\n'), ((1721, 1755), 'os.path.join', 'os.path.join', (['dname', "(l[0] + '.png')"], {}), "(dname, l[0] + '.png')\n", (1733, 1755), False, 'import os\n'), ((2218, 2240), 'papirus.Papirus', 'Papirus', ([], {'rotate': 'rotate'}), '(rotate=rotate)\n', (2225, 2240), False, 'from papirus import Papirus\n'), ((3779, 3795), 'time.localtime', 'time.localtime', ([], {}), '()\n', (3793, 3795), False, 'import time\n')]
|
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
"""Assistant utility for automatically load network from network
description."""
import torch
class Assistant:
"""Assistant that bundles training, validation and testing workflow.
Parameters
----------
net : torch.nn.Module
network to train.
error : object or lambda
an error object or a lambda function that evaluates error.
It is expected to take ``(output, target)`` | ``(output, label)``
as it's argument and return a scalar value.
optimizer : torch optimizer
the learning optimizer.
stats : slayer.utils.stats
learning stats logger. If None, stats will not be logged.
Defaults to None.
classifier : slayer.classifier or lambda
classifier object or lambda function that takes output and
returns the network prediction. None means regression mode.
Classification steps are bypassed.
Defaults to None.
count_log : bool
flag to enable count log. Defaults to False.
lam : float
lagrangian to merge network layer based loss.
None means no such additional loss.
If not None, net is expected to return the accumulated loss as second
argument. It is intended to be used with layer wise sparsity loss.
Defaults to None.
Attributes
----------
net
error
optimizer
stats
classifier
count_log
lam
device : torch.device or None
the main device memory where network is placed. It is not at start and
gets initialized on the first call.
"""
def __init__(
self,
net, error, optimizer,
stats=None, classifier=None, count_log=False,
lam=None
):
self.net = net
self.error = error
self.optimizer = optimizer
self.classifier = classifier
self.stats = stats
self.count_log = count_log
self.lam = lam
self.device = None
def reduce_lr(self, factor=10 / 3):
"""Reduces the learning rate of the optimizer by ``factor``.
Parameters
----------
factor : float
learning rate reduction factor. Defaults to 10/3.
Returns
-------
"""
for param_group in self.optimizer.param_groups:
print('\nLearning rate reduction from', param_group['lr'])
param_group['lr'] /= factor
def train(self, input, target):
"""Training assistant.
Parameters
----------
input : torch tensor
input tensor.
target : torch tensor
ground truth or label.
Returns
-------
output
network's output.
count : optional
spike count if ``count_log`` is enabled
"""
self.net.train()
if self.device is None:
for p in self.net.parameters():
self.device = p.device
break
device = self.device
input = input.to(device)
target = target.to(device)
count = None
if self.count_log is True:
if self.lam is None:
output, count = self.net(input)
else:
output, net_loss, count = self.net(input)
else:
if self.lam is None:
output = self.net(input)
else:
output, net_loss = self.net(input)
loss = self.error(output, target)
if self.stats is not None:
self.stats.training.num_samples += input.shape[0]
self.stats.training.loss_sum += loss.cpu().data.item() \
* output.shape[0]
if self.classifier is not None: # classification
self.stats.training.correct_samples += torch.sum(
self.classifier(output) == target
).cpu().data.item()
if self.lam is not None: # add net_loss before backward step
loss += self.lam * net_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if count is None:
return output
return output, count
def test(self, input, target):
"""Testing assistant.
Parameters
----------
input : torch tensor
input tensor.
target : torch tensor
ground truth or label.
Returns
-------
output
network's output.
count : optional
spike count if ``count_log`` is enabled
"""
self.net.eval()
if self.device is None:
for p in self.net.parameters():
self.device = p.device
break
device = self.device
with torch.no_grad():
input = input.to(device)
target = target.to(device)
count = None
if self.count_log is True:
if self.lam is None:
output, count = self.net(input)
else:
output, _, count = self.net(input)
else:
if self.lam is None:
output = self.net(input)
else:
output, _ = self.net(input)
loss = self.error(output, target)
if self.stats is not None:
self.stats.testing.num_samples += input.shape[0]
self.stats.testing.loss_sum += loss.cpu().data.item() \
* output.shape[0]
if self.classifier is not None: # classification
self.stats.testing.correct_samples += torch.sum(
self.classifier(output) == target
).cpu().data.item()
if count is None:
return output
return output, count
def valid(self, input, target):
"""Validation assistant.
Parameters
----------
input : torch tensor
input tensor.
target : torch tensor
ground truth or label.
Returns
-------
output
network's output.
count : optional
spike count if ``count_log`` is enabled
"""
self.net.eval()
with torch.no_grad():
device = self.net.device
input = input.to(device)
target = target.to(device)
count = None
if self.count_log is True:
if self.lam is None:
output, count = self.net(input)
else:
output, _, count = self.net(input)
else:
if self.lam is None:
output = self.net(input)
else:
output, _ = self.net(input)
loss = self.error(output, target)
if self.stats is not None:
self.stats.validation.num_samples += input.shape[0]
if self.lam is None:
self.stats.validation.loss_sum += loss.cpu().data.item() \
* output.shape[0]
else:
self.stats.validation.loss_sum += loss.cpu().data.item() \
* output.shape[0]
if self.classifier is not None: # classification
self.stats.validation.correct_samples += torch.sum(
self.classifier(output) == target
).cpu().data.item()
if count is None:
return output
return output, count
|
[
"torch.no_grad"
] |
[((4844, 4859), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4857, 4859), False, 'import torch\n'), ((6364, 6379), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6377, 6379), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('historias', '0006_auto_20150413_0001'),
]
operations = [
migrations.AlterField(
model_name='historias',
name='fecha_ingreso',
field=models.DateField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 468359), help_text='Formato: dd/mm/yyyy', verbose_name='Fecha de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='historias',
name='hora_ingreso',
field=models.TimeField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 468307), help_text='Formato: hh:mm', verbose_name='Hora de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='ubicaciones',
name='sala',
field=models.CharField(max_length=10, choices=[(b'SALA 1', b'SALA 1'), (b'SALA 2', b'SALA 2'), (b'SALA 3', b'SALA 3'), (b'SALA 4', b'SALA 4'), (b'SALA 5', b'SALA 5'), (b'GAURDIA', b'GAURDIA'), (b'NEO', b'NEO'), (b'UTI', b'UTI'), (b'UCO', b'UCO'), (b'PRE PARTO', b'PRE PARTO')]),
preserve_default=True,
),
]
|
[
"datetime.datetime",
"django.db.models.CharField"
] |
[((984, 1268), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'choices': "[(b'SALA 1', b'SALA 1'), (b'SALA 2', b'SALA 2'), (b'SALA 3', b'SALA 3'), (\n b'SALA 4', b'SALA 4'), (b'SALA 5', b'SALA 5'), (b'GAURDIA', b'GAURDIA'),\n (b'NEO', b'NEO'), (b'UTI', b'UTI'), (b'UCO', b'UCO'), (b'PRE PARTO',\n b'PRE PARTO')]"}), "(max_length=10, choices=[(b'SALA 1', b'SALA 1'), (b'SALA 2',\n b'SALA 2'), (b'SALA 3', b'SALA 3'), (b'SALA 4', b'SALA 4'), (b'SALA 5',\n b'SALA 5'), (b'GAURDIA', b'GAURDIA'), (b'NEO', b'NEO'), (b'UTI', b'UTI'\n ), (b'UCO', b'UCO'), (b'PRE PARTO', b'PRE PARTO')])\n", (1000, 1268), False, 'from django.db import models, migrations\n'), ((405, 455), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(4)', '(25)', '(14)', '(59)', '(14)', '(468359)'], {}), '(2015, 4, 25, 14, 59, 14, 468359)\n', (422, 455), False, 'import datetime\n'), ((713, 763), 'datetime.datetime', 'datetime.datetime', (['(2015)', '(4)', '(25)', '(14)', '(59)', '(14)', '(468307)'], {}), '(2015, 4, 25, 14, 59, 14, 468307)\n', (730, 763), False, 'import datetime\n')]
|
import os
from pathlib import Path
from data_utils import get_data_path, get_image_data_path, get_image_extension
def app_id_to_image_filename(app_id, is_horizontal_banner=False):
image_data_path = get_image_data_path(is_horizontal_banner)
image_filename = image_data_path + str(app_id) + get_image_extension()
return image_filename
def image_filename_to_app_id(image_filename):
base_name = os.path.basename(image_filename)
app_id = base_name.strip(get_image_extension())
return app_id
def list_app_ids(is_horizontal_banner=False):
image_data_path = get_image_data_path(is_horizontal_banner)
image_filenames = Path(image_data_path).glob("*" + get_image_extension())
app_ids = [image_filename_to_app_id(filename) for filename in image_filenames]
app_ids = sorted(app_ids, key=int)
return app_ids
def get_frozen_app_ids_filename():
frozen_app_ids_filename = get_data_path() + "frozen_app_ids.txt"
return frozen_app_ids_filename
def freeze_app_ids(app_ids, output_file_name=None):
if output_file_name is None:
output_file_name = get_frozen_app_ids_filename()
with open(output_file_name, "w", encoding="utf8") as f:
for app_id in app_ids:
f.write("{}\n".format(app_id))
return
def load_frozen_app_ids(input_file_name=None):
if input_file_name is None:
input_file_name = get_frozen_app_ids_filename()
with open(input_file_name, "r", encoding="utf8") as f:
# Do not convert to a set object, or any other conversion, because we want to keep the list order as it is.
# Just read the list from the file. That is all there is to do. Otherwise, appIDs will be scrambled!
frozen_app_ids = [app_id.strip() for app_id in f.readlines()]
return frozen_app_ids
def get_frozen_app_ids(is_horizontal_banner=False):
try:
frozen_app_ids = load_frozen_app_ids()
except FileNotFoundError:
print("Creating {}".format(get_frozen_app_ids_filename()))
frozen_app_ids = list_app_ids(is_horizontal_banner=is_horizontal_banner)
freeze_app_ids(frozen_app_ids)
return frozen_app_ids
|
[
"data_utils.get_image_extension",
"pathlib.Path",
"data_utils.get_data_path",
"os.path.basename",
"data_utils.get_image_data_path"
] |
[((205, 246), 'data_utils.get_image_data_path', 'get_image_data_path', (['is_horizontal_banner'], {}), '(is_horizontal_banner)\n', (224, 246), False, 'from data_utils import get_data_path, get_image_data_path, get_image_extension\n'), ((414, 446), 'os.path.basename', 'os.path.basename', (['image_filename'], {}), '(image_filename)\n', (430, 446), False, 'import os\n'), ((589, 630), 'data_utils.get_image_data_path', 'get_image_data_path', (['is_horizontal_banner'], {}), '(is_horizontal_banner)\n', (608, 630), False, 'from data_utils import get_data_path, get_image_data_path, get_image_extension\n'), ((301, 322), 'data_utils.get_image_extension', 'get_image_extension', ([], {}), '()\n', (320, 322), False, 'from data_utils import get_data_path, get_image_data_path, get_image_extension\n'), ((477, 498), 'data_utils.get_image_extension', 'get_image_extension', ([], {}), '()\n', (496, 498), False, 'from data_utils import get_data_path, get_image_data_path, get_image_extension\n'), ((921, 936), 'data_utils.get_data_path', 'get_data_path', ([], {}), '()\n', (934, 936), False, 'from data_utils import get_data_path, get_image_data_path, get_image_extension\n'), ((654, 675), 'pathlib.Path', 'Path', (['image_data_path'], {}), '(image_data_path)\n', (658, 675), False, 'from pathlib import Path\n'), ((687, 708), 'data_utils.get_image_extension', 'get_image_extension', ([], {}), '()\n', (706, 708), False, 'from data_utils import get_data_path, get_image_data_path, get_image_extension\n')]
|
import numpy as np
from coffeine.covariance_transformers import (
Diag,
LogDiag,
ExpandFeatures,
Riemann,
RiemannSnp,
NaiveVec)
from coffeine.spatial_filters import (
ProjIdentitySpace,
ProjCommonSpace,
ProjLWSpace,
ProjRandomSpace,
ProjSPoCSpace)
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import RidgeCV, LogisticRegression
def make_filter_bank_transformer(names, method='riemann',
projection_params=None,
vectorization_params=None,
categorical_interaction=None):
"""Generate pipeline for filterbank models.
Prepare filter bank models as used in [1]_. These models take as input
sensor-space covariance matrices computed from M/EEG signals in different
frequency bands. Then transformations are applied to improve the
applicability of linear regression techniques by reducing the impact of
field spread.
In terms of implementation, this involves 1) projection
(e.g. spatial filters) and 2) vectorization (e.g. taking the log on the
diagonal).
.. note::
The resulting model expects as inputs data frames in which different
covarances (e.g. for different frequencies) are stored inside columns
indexed by ``names``.
Other columns will be passed through by the underlying column
transformers.
The pipeline also supports fitting categorical interaction effects
after projection and vectorization steps are performed.
.. note::
All essential methods from [1]_ are implemented here. In practice,
we recommend comparing `riemann', `spoc' and `diag' as a baseline.
Parameters
----------
names : list of str
The column names of the data frame corresponding to different
covariances.
method : str
The method used for extracting features from covariances. Defaults
to ``'riemann'``. Can be ``'riemann'``, ``'lw_riemann'``, ``'diag'``,
``'log_diag'``, ``'random'``, ``'naive'``, ``'spoc'``,
``'riemann_wasserstein'``.
projection_params : dict | None
The parameters for the projection step.
vectorization_params : dict | None
The parameters for the vectorization step.
categorical_interaction : str
The column in the input data frame containing a binary descriptor
used to fit 2-way interaction effects.
References
----------
[1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
Predictive regression modeling with MEG/EEG: from source power
to signals and cognitive states.
*NeuroImage*, page 116893,2020. ISSN 1053-8119.
https://doi.org/10.1016/j.neuroimage.2020.116893
"""
# put defaults here for projection and vectorization step
projection_defaults = {
'riemann': dict(scale=1, n_compo='full', reg=1.e-05),
'lw_riemann': dict(shrink=1),
'diag': dict(),
'log_diag': dict(),
'random': dict(n_compo='full'),
'naive': dict(),
'spoc': dict(n_compo='full', scale='auto', reg=1.e-05, shrink=1),
'riemann_wasserstein': dict()
}
vectorization_defaults = {
'riemann': dict(metric='riemann'),
'lw_riemann': dict(metric='riemann'),
'diag': dict(),
'log_diag': dict(),
'random': dict(),
'naive': dict(method='upper'),
'spoc': dict(),
'riemann_wasserstein': dict(rank='full')
}
assert set(projection_defaults) == set(vectorization_defaults)
if method not in projection_defaults:
raise ValueError(
f"The `method` ('{method}') you specified is unknown.")
# update defaults
projection_params_ = projection_defaults[method]
if projection_params is not None:
projection_params_.update(**projection_params)
vectorization_params_ = vectorization_defaults[method]
if vectorization_params is not None:
vectorization_params_.update(**vectorization_params)
def _get_projector_vectorizer(projection, vectorization):
return [(make_pipeline(*
[projection(**projection_params_),
vectorization(**vectorization_params_)]),
name) for name in names]
# setup pipelines (projection + vectorization step)
steps = tuple()
if method == 'riemann':
steps = (ProjCommonSpace, Riemann)
elif method == 'lw_riemann':
steps = (ProjLWSpace, Riemann)
elif method == 'diag':
steps = (ProjIdentitySpace, Diag)
elif method == 'log_diag':
steps = (ProjIdentitySpace, LogDiag)
elif method == 'random':
steps = (ProjRandomSpace, LogDiag)
elif method == 'naive':
steps = (ProjIdentitySpace, NaiveVec)
elif method == 'spoc':
steps = (ProjSPoCSpace, LogDiag)
elif method == 'riemann_wasserstein':
steps = (ProjIdentitySpace, RiemannSnp)
filter_bank_transformer = make_column_transformer(
*_get_projector_vectorizer(*steps), remainder='passthrough')
if categorical_interaction is not None:
filter_bank_transformer = ExpandFeatures(
filter_bank_transformer, expander_column=categorical_interaction)
return filter_bank_transformer
def make_filter_bank_regressor(names, method='riemann',
projection_params=None,
vectorization_params=None,
categorical_interaction=None, scaling=None,
estimator=None):
"""Generate pipeline for regression with filter bank model.
Prepare filter bank models as used in [1]_. These models take as input
sensor-space covariance matrices computed from M/EEG signals in different
frequency bands. Then transformations are applied to improve the
applicability of linear regression techniques by reducing the impact of
field spread.
In terms of implementation, this involves 1) projection
(e.g. spatial filters) and 2) vectorization (e.g. taking the log on the
diagonal).
.. note::
The resulting model expects as inputs data frames in which different
covarances (e.g. for different frequencies) are stored inside columns
indexed by ``names``.
Other columns will be passed through by the underlying column
transformers.
The pipeline also supports fitting categorical interaction effects
after projection and vectorization steps are performed.
.. note::
All essential methods from [1]_ are implemented here. In practice,
we recommend comparing `riemann', `spoc' and `diag' as a baseline.
Parameters
----------
names : list of str
The column names of the data frame corresponding to different
covariances.
method : str
The method used for extracting features from covariances. Defaults
to ``'riemann'``. Can be ``'riemann'``, ``'lw_riemann'``, ``'diag'``,
``'log_diag'``, ``'random'``, ``'naive'``, ``'spoc'``,
``'riemann_wasserstein'``.
projection_params : dict | None
The parameters for the projection step.
vectorization_params : dict | None
The parameters for the vectorization step.
categorical_interaction : str
The column in the input data frame containing a binary descriptor
used to fit 2-way interaction effects.
scaling : scikit-learn Transformer object | None
Method for re-rescaling the features. Defaults to None. If None,
StandardScaler is used.
estimator : scikit-learn Estimator object.
The estimator object. Defaults to None. If None, RidgeCV
is performed with default values.
References
----------
[1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
Predictive regression modeling with MEG/EEG: from source power
to signals and cognitive states.
*NeuroImage*, page 116893,2020. ISSN 1053-8119.
https://doi.org/10.1016/j.neuroimage.2020.116893
"""
filter_bank_transformer = make_filter_bank_transformer(
names=names, method=method, projection_params=projection_params,
vectorization_params=vectorization_params,
categorical_interaction=categorical_interaction
)
scaling_ = scaling
if scaling_ is None:
scaling_ = StandardScaler()
estimator_ = estimator
if estimator_ is None:
estimator_ = RidgeCV(alphas=np.logspace(-3, 5, 100))
filter_bank_regressor = make_pipeline(
filter_bank_transformer,
scaling_,
estimator_
)
return filter_bank_regressor
def make_filter_bank_classifier(names, method='riemann',
projection_params=None,
vectorization_params=None,
categorical_interaction=None, scaling=None,
estimator=None):
"""Generate pipeline for classification with filter bank model.
Prepare filter bank models as used in [1]_. These models take as input
sensor-space covariance matrices computed from M/EEG signals in different
frequency bands. Then transformations are applied to improve the
applicability of linear regression techniques by reducing the impact of
field spread.
In terms of implementation, this involves 1) projection
(e.g. spatial filters) and 2) vectorization (e.g. taking the log on the
diagonal).
.. note::
The resulting model expects as inputs data frames in which different
covarances (e.g. for different frequencies) are stored inside columns
indexed by ``names``.
Other columns will be passed through by the underlying column
transformers.
The pipeline also supports fitting categorical interaction effects
after projection and vectorization steps are performed.
.. note::
All essential methods from [1]_ are implemented here. In practice,
we recommend comparing `riemann', `spoc' and `diag' as a baseline.
Parameters
----------
names : list of str
The column names of the data frame corresponding to different
covariances.
method : str
The method used for extracting features from covariances. Defaults
to ``'riemann'``. Can be ``'riemann'``, ``'lw_riemann'``, ``'diag'``,
``'log_diag'``, ``'random'``, ``'naive'``, ``'spoc'``,
``'riemann_wasserstein'``.
projection_params : dict | None
The parameters for the projection step.
vectorization_params : dict | None
The parameters for the vectorization step.
categorical_interaction : str
The column in the input data frame containing a binary descriptor
used to fit 2-way interaction effects.
scaling : scikit-learn Transformer object | None
Method for re-rescaling the features. Defaults to None. If None,
StandardScaler is used.
estimator : scikit-learn Estimator object.
The estimator object. Defaults to None. If None, LogisticRegression
is performed with default values.
References
----------
[1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
Predictive regression modeling with MEG/EEG: from source power
to signals and cognitive states.
*NeuroImage*, page 116893,2020. ISSN 1053-8119.
https://doi.org/10.1016/j.neuroimage.2020.116893
"""
filter_bank_transformer = make_filter_bank_transformer(
names=names, method=method, projection_params=projection_params,
vectorization_params=vectorization_params,
categorical_interaction=categorical_interaction
)
scaling_ = scaling
if scaling_ is None:
scaling_ = StandardScaler()
estimator_ = estimator
if estimator_ is None:
estimator_ = LogisticRegression(solver='liblinear')
filter_bank_regressor = make_pipeline(
filter_bank_transformer,
scaling_,
estimator_
)
return filter_bank_regressor
|
[
"coffeine.covariance_transformers.ExpandFeatures",
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.StandardScaler",
"sklearn.pipeline.make_pipeline",
"numpy.logspace"
] |
[((8747, 8807), 'sklearn.pipeline.make_pipeline', 'make_pipeline', (['filter_bank_transformer', 'scaling_', 'estimator_'], {}), '(filter_bank_transformer, scaling_, estimator_)\n', (8760, 8807), False, 'from sklearn.pipeline import make_pipeline\n'), ((12164, 12224), 'sklearn.pipeline.make_pipeline', 'make_pipeline', (['filter_bank_transformer', 'scaling_', 'estimator_'], {}), '(filter_bank_transformer, scaling_, estimator_)\n', (12177, 12224), False, 'from sklearn.pipeline import make_pipeline\n'), ((5344, 5429), 'coffeine.covariance_transformers.ExpandFeatures', 'ExpandFeatures', (['filter_bank_transformer'], {'expander_column': 'categorical_interaction'}), '(filter_bank_transformer, expander_column=categorical_interaction\n )\n', (5358, 5429), False, 'from coffeine.covariance_transformers import Diag, LogDiag, ExpandFeatures, Riemann, RiemannSnp, NaiveVec\n'), ((8585, 8601), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (8599, 8601), False, 'from sklearn.preprocessing import StandardScaler\n'), ((12003, 12019), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (12017, 12019), False, 'from sklearn.preprocessing import StandardScaler\n'), ((12096, 12134), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""'}), "(solver='liblinear')\n", (12114, 12134), False, 'from sklearn.linear_model import RidgeCV, LogisticRegression\n'), ((8693, 8716), 'numpy.logspace', 'np.logspace', (['(-3)', '(5)', '(100)'], {}), '(-3, 5, 100)\n', (8704, 8716), True, 'import numpy as np\n')]
|
import os
import df2img
import disnake
import numpy as np
import pandas as pd
from menus.menu import Menu
from PIL import Image
import discordbot.config_discordbot as cfg
from discordbot.config_discordbot import gst_imgur, logger
from discordbot.helpers import autocrop_image
from gamestonk_terminal.stocks.options import yfinance_model
async def chain_command(
ctx,
ticker: str = None,
expiry: str = None,
opt_type: str = None,
min_sp: float = None,
max_sp: float = None,
):
"""Show calls/puts for given ticker and expiration"""
try:
# Debug
if cfg.DEBUG:
logger.debug(
"opt-chain %s %s %s %s %s", ticker, expiry, opt_type, min_sp, max_sp
)
# Check for argument
if not ticker:
raise Exception("Stock ticker is required")
dates = yfinance_model.option_expirations(ticker)
if not dates:
raise Exception("Stock ticker is invalid")
options = yfinance_model.get_option_chain(ticker, str(expiry))
calls_df = options.calls
puts_df = options.puts
column_map = {"openInterest": "oi", "volume": "vol", "impliedVolatility": "iv"}
columns = [
"strike",
"bid",
"ask",
"volume",
"openInterest",
"impliedVolatility",
]
if opt_type == "Calls":
df = calls_df[columns].rename(columns=column_map)
if opt_type == "Puts":
df = puts_df[columns].rename(columns=column_map)
min_strike = np.percentile(df["strike"], 1)
max_strike = np.percentile(df["strike"], 100)
if min_sp:
min_strike = min_sp
if max_sp:
max_strike = max_sp
if min_sp > max_sp: # type: ignore
min_sp, max_sp = max_strike, min_strike
df = df[df["strike"] >= min_strike]
df = df[df["strike"] <= max_strike]
df["iv"] = pd.to_numeric(df["iv"].astype(float))
formats = {"iv": "{:.2f}"}
for col, f in formats.items():
df[col] = df[col].map(lambda x: f.format(x)) # pylint: disable=W0640
df.set_index("strike", inplace=True)
title = f"Stocks: {opt_type} Option Chain for {ticker.upper()} on {expiry} [yfinance]"
embeds: list = []
# Weekly Calls Pages
i, i2, end = 0, 0, 20
df_pg = []
embeds_img = []
dindex = len(df.index)
while i < dindex:
df_pg = df.iloc[i:end]
df_pg.append(df_pg)
figp = df2img.plot_dataframe(
df_pg,
fig_size=(1000, (40 + (40 * 20))),
col_width=[3, 3, 3, 3],
tbl_cells=dict(
height=35,
),
font=dict(
family="Consolas",
size=20,
),
template="plotly_dark",
paper_bgcolor="rgba(0, 0, 0, 0)",
)
imagefile = f"opt-chain{i}.png"
df2img.save_dataframe(fig=figp, filename=imagefile)
image = Image.open(imagefile)
image = autocrop_image(image, 0)
image.save(imagefile, "PNG", quality=100)
uploaded_image = gst_imgur.upload_image(imagefile, title="something")
image_link = uploaded_image.link
embeds_img.append(
f"{image_link}",
)
embeds.append(
disnake.Embed(
title=title,
colour=cfg.COLOR,
),
)
i2 += 1
i += 20
end += 20
os.remove(imagefile)
# Author/Footer
for i in range(0, i2):
embeds[i].set_author(
name=cfg.AUTHOR_NAME,
url=cfg.AUTHOR_URL,
icon_url=cfg.AUTHOR_ICON_URL,
)
embeds[i].set_footer(
text=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
i = 0
for i in range(0, i2):
embeds[i].set_image(url=embeds_img[i])
i += 1
embeds[0].set_footer(text=f"Page 1 of {len(embeds)}")
options = [
disnake.SelectOption(label="Home", value="0", emoji="🟢"),
]
await ctx.send(embed=embeds[0], view=Menu(embeds, options))
except Exception as e:
embed = disnake.Embed(
title="ERROR Stock-Options: Expirations",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed, delete_after=30.0)
|
[
"gamestonk_terminal.stocks.options.yfinance_model.option_expirations",
"disnake.Embed",
"PIL.Image.open",
"discordbot.helpers.autocrop_image",
"df2img.save_dataframe",
"discordbot.config_discordbot.logger.debug",
"disnake.SelectOption",
"menus.menu.Menu",
"os.remove",
"numpy.percentile",
"discordbot.config_discordbot.gst_imgur.upload_image"
] |
[((860, 901), 'gamestonk_terminal.stocks.options.yfinance_model.option_expirations', 'yfinance_model.option_expirations', (['ticker'], {}), '(ticker)\n', (893, 901), False, 'from gamestonk_terminal.stocks.options import yfinance_model\n'), ((1587, 1617), 'numpy.percentile', 'np.percentile', (["df['strike']", '(1)'], {}), "(df['strike'], 1)\n", (1600, 1617), True, 'import numpy as np\n'), ((1639, 1671), 'numpy.percentile', 'np.percentile', (["df['strike']", '(100)'], {}), "(df['strike'], 100)\n", (1652, 1671), True, 'import numpy as np\n'), ((621, 707), 'discordbot.config_discordbot.logger.debug', 'logger.debug', (['"""opt-chain %s %s %s %s %s"""', 'ticker', 'expiry', 'opt_type', 'min_sp', 'max_sp'], {}), "('opt-chain %s %s %s %s %s', ticker, expiry, opt_type, min_sp,\n max_sp)\n", (633, 707), False, 'from discordbot.config_discordbot import gst_imgur, logger\n'), ((3090, 3141), 'df2img.save_dataframe', 'df2img.save_dataframe', ([], {'fig': 'figp', 'filename': 'imagefile'}), '(fig=figp, filename=imagefile)\n', (3111, 3141), False, 'import df2img\n'), ((3162, 3183), 'PIL.Image.open', 'Image.open', (['imagefile'], {}), '(imagefile)\n', (3172, 3183), False, 'from PIL import Image\n'), ((3204, 3228), 'discordbot.helpers.autocrop_image', 'autocrop_image', (['image', '(0)'], {}), '(image, 0)\n', (3218, 3228), False, 'from discordbot.helpers import autocrop_image\n'), ((3313, 3365), 'discordbot.config_discordbot.gst_imgur.upload_image', 'gst_imgur.upload_image', (['imagefile'], {'title': '"""something"""'}), "(imagefile, title='something')\n", (3335, 3365), False, 'from discordbot.config_discordbot import gst_imgur, logger\n'), ((3725, 3745), 'os.remove', 'os.remove', (['imagefile'], {}), '(imagefile)\n', (3734, 3745), False, 'import os\n'), ((4313, 4369), 'disnake.SelectOption', 'disnake.SelectOption', ([], {'label': '"""Home"""', 'value': '"""0"""', 'emoji': '"""🟢"""'}), "(label='Home', value='0', emoji='🟢')\n", (4333, 4369), False, 'import disnake\n'), ((4494, 4586), 'disnake.Embed', 'disnake.Embed', ([], {'title': '"""ERROR Stock-Options: Expirations"""', 'colour': 'cfg.COLOR', 'description': 'e'}), "(title='ERROR Stock-Options: Expirations', colour=cfg.COLOR,\n description=e)\n", (4507, 4586), False, 'import disnake\n'), ((3532, 3576), 'disnake.Embed', 'disnake.Embed', ([], {'title': 'title', 'colour': 'cfg.COLOR'}), '(title=title, colour=cfg.COLOR)\n', (3545, 3576), False, 'import disnake\n'), ((4427, 4448), 'menus.menu.Menu', 'Menu', (['embeds', 'options'], {}), '(embeds, options)\n', (4431, 4448), False, 'from menus.menu import Menu\n')]
|
# -*- coding:utf-8 -*-
"""
"""
import cudf
from hypergbm import make_experiment
from hypernets.tabular import get_tool_box
from hypernets.tabular.datasets import dsutils
def main(target='y', dtype=None, max_trials=3, drift_detection=False, clear_cache=True, **kwargs):
tb = get_tool_box(cudf.DataFrame)
assert isinstance(tb, type) and tb.__name__ == 'CumlToolBox'
print("preparing...")
df = dsutils.load_bank()
if dtype is not None:
df[target] = df[target].astype(dtype)
df, = tb.from_local(df)
assert isinstance(df, cudf.DataFrame)
df_train, df_test = tb.train_test_split(df, test_size=0.5, random_state=123)
X_test = df_test
y_test = X_test.pop(target)
exp = make_experiment(df_train, target=target,
test_data=X_test.copy(),
max_trials=max_trials,
drift_detection=drift_detection,
clear_cache=clear_cache,
**kwargs)
print('experiment:', f'{[s.name for s in exp.steps]}', 'random_state', exp.random_state)
print("training...")
estimator = exp.run()
print('estimator pipeline:', f'{[s[0] for s in estimator.steps]}')
print("scoring...")
y_pred = estimator.predict(X_test)
y_proba = estimator.predict_proba(X_test)
task = exp.task
if task == 'regression':
metrics = ['mse', 'mae', 'msle', 'rmse', 'r2']
else:
metrics = ['auc', 'accuracy', 'f1', 'recall']
result = tb.metrics.calc_score(y_test, y_pred, y_proba, task=task, metrics=metrics,
pos_label=kwargs.get('pos_label', None))
print(result)
return exp, estimator
if __name__ == '__main__':
main(target='y', reward_metric='auc', ensemble_size=10, pos_label='yes', log_level='info', max_trials=10)
# main(target='y', max_trials=10, cv=False, ensemble_size=0, verbose=0, pos_label='yes', )
# main(target='day', reward_metric='f1', ensemble_size=10, log_level='info', max_trials=5)
# main(target='day', dtype='str', reward_metric='f1', ensemble_size=0, log_level='info', max_trials=6)
# main(target='age', dtype='float', ensemble_size=10, log_level='info', max_trials=8)
|
[
"hypernets.tabular.get_tool_box",
"hypernets.tabular.datasets.dsutils.load_bank"
] |
[((283, 311), 'hypernets.tabular.get_tool_box', 'get_tool_box', (['cudf.DataFrame'], {}), '(cudf.DataFrame)\n', (295, 311), False, 'from hypernets.tabular import get_tool_box\n'), ((413, 432), 'hypernets.tabular.datasets.dsutils.load_bank', 'dsutils.load_bank', ([], {}), '()\n', (430, 432), False, 'from hypernets.tabular.datasets import dsutils\n')]
|
#!/usr/bin/python3
from setuptools import setup, find_packages
setup(
package_dir = { '': 'src' },
packages = find_packages( where='src' ),
)
|
[
"setuptools.find_packages"
] |
[((120, 146), 'setuptools.find_packages', 'find_packages', ([], {'where': '"""src"""'}), "(where='src')\n", (133, 146), False, 'from setuptools import setup, find_packages\n')]
|
#!/usr/bin/env python
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os.path
import shlex
import struct
import common
import sparse_img
from rangelib import RangeSet
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
BLOCK_SIZE = common.BLOCK_SIZE
FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
class BuildVerityImageError(Exception):
"""An Exception raised during verity image building."""
def __init__(self, message):
Exception.__init__(self, message)
def GetVerityFECSize(image_size):
cmd = ["fec", "-s", str(image_size)]
output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
def GetVerityTreeSize(image_size):
cmd = ["build_verity_tree", "-s", str(image_size)]
output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
def GetVerityMetadataSize(image_size):
cmd = ["build_verity_metadata", "size", str(image_size)]
output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
def GetVeritySize(image_size, fec_supported):
verity_tree_size = GetVerityTreeSize(image_size)
verity_metadata_size = GetVerityMetadataSize(image_size)
verity_size = verity_tree_size + verity_metadata_size
if fec_supported:
fec_size = GetVerityFECSize(image_size + verity_size)
return verity_size + fec_size
return verity_size
def GetSimgSize(image_file):
simg = sparse_img.SparseImage(image_file, build_map=False)
return simg.blocksize * simg.total_blocks
def ZeroPadSimg(image_file, pad_size):
blocks = pad_size // BLOCK_SIZE
logger.info("Padding %d blocks (%d bytes)", blocks, pad_size)
simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
simg.AppendFillChunk(0, blocks)
def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
padding_size):
cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
verity_path, verity_fec_path]
common.RunAndCheckOutput(cmd)
def BuildVerityTree(sparse_image_path, verity_image_path):
cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
verity_image_path]
output = common.RunAndCheckOutput(cmd)
root, salt = output.split()
return root, salt
def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
block_device, signer_path, key, signer_args,
verity_disable):
cmd = ["build_verity_metadata", "build", str(image_size),
verity_metadata_path, root_hash, salt, block_device, signer_path, key]
if signer_args:
cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
if verity_disable:
cmd.append("--verity_disable")
common.RunAndCheckOutput(cmd)
def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
"""Appends the unsparse image to the given sparse image.
Args:
sparse_image_path: the path to the (sparse) image
unsparse_image_path: the path to the (unsparse) image
Raises:
BuildVerityImageError: On error.
"""
cmd = ["append2simg", sparse_image_path, unsparse_image_path]
try:
common.RunAndCheckOutput(cmd)
except:
logger.exception(error_message)
raise BuildVerityImageError(error_message)
def Append(target, file_to_append, error_message):
"""Appends file_to_append to target.
Raises:
BuildVerityImageError: On error.
"""
try:
with open(target, 'ab') as out_file, \
open(file_to_append, 'rb') as input_file:
for line in input_file:
out_file.write(line)
except IOError:
logger.exception(error_message)
raise BuildVerityImageError(error_message)
def CreateVerityImageBuilder(prop_dict):
"""Returns a verity image builder based on the given build properties.
Args:
prop_dict: A dict that contains the build properties. In particular, it will
look for verity-related property values.
Returns:
A VerityImageBuilder instance for Verified Boot 1.0 or Verified Boot 2.0; or
None if the given build doesn't support Verified Boot.
"""
partition_size = prop_dict.get("partition_size")
# partition_size could be None at this point, if using dynamic partitions.
if partition_size:
partition_size = int(partition_size)
# Verified Boot 1.0
verity_supported = prop_dict.get("verity") == "true"
is_verity_partition = "verity_block_device" in prop_dict
if verity_supported and is_verity_partition:
if OPTIONS.verity_signer_path is not None:
signer_path = OPTIONS.verity_signer_path
else:
signer_path = prop_dict["verity_signer_cmd"]
return Version1VerityImageBuilder(
partition_size,
prop_dict["verity_block_device"],
prop_dict.get("verity_fec") == "true",
signer_path,
prop_dict["verity_key"] + ".pk8",
OPTIONS.verity_signer_args,
"verity_disable" in prop_dict)
# Verified Boot 2.0
if (prop_dict.get("avb_hash_enable") == "true" or
prop_dict.get("avb_hashtree_enable") == "true"):
# key_path and algorithm are only available when chain partition is used.
key_path = prop_dict.get("avb_key_path")
algorithm = prop_dict.get("avb_algorithm")
# Image uses hash footer.
if prop_dict.get("avb_hash_enable") == "true":
return VerifiedBootVersion2VerityImageBuilder(
prop_dict["partition_name"],
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASH_FOOTER,
prop_dict["avb_avbtool"],
key_path,
algorithm,
prop_dict.get("avb_salt"),
prop_dict["avb_add_hash_footer_args"])
# Image uses hashtree footer.
return VerifiedBootVersion2VerityImageBuilder(
prop_dict["partition_name"],
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASHTREE_FOOTER,
prop_dict["avb_avbtool"],
key_path,
algorithm,
prop_dict.get("avb_salt"),
prop_dict["avb_add_hashtree_footer_args"])
return None
class VerityImageBuilder(object):
"""A builder that generates an image with verity metadata for Verified Boot.
A VerityImageBuilder instance handles the works for building an image with
verity metadata for supporting Android Verified Boot. This class defines the
common interface between Verified Boot 1.0 and Verified Boot 2.0. A matching
builder will be returned based on the given build properties.
More info on the verity image generation can be found at the following link.
https://source.android.com/security/verifiedboot/dm-verity#implementation
"""
def CalculateMaxImageSize(self, partition_size):
"""Calculates the filesystem image size for the given partition size."""
raise NotImplementedError
def CalculateDynamicPartitionSize(self, image_size):
"""Calculates and sets the partition size for a dynamic partition."""
raise NotImplementedError
def PadSparseImage(self, out_file):
"""Adds padding to the generated sparse image."""
raise NotImplementedError
def Build(self, out_file):
"""Builds the verity image and writes it to the given file."""
raise NotImplementedError
class Version1VerityImageBuilder(VerityImageBuilder):
"""A VerityImageBuilder for Verified Boot 1.0."""
def __init__(self, partition_size, block_dev, fec_supported, signer_path,
signer_key, signer_args, verity_disable):
self.version = 1
self.partition_size = partition_size
self.block_device = block_dev
self.fec_supported = fec_supported
self.signer_path = signer_path
self.signer_key = signer_key
self.signer_args = signer_args
self.verity_disable = verity_disable
self.image_size = None
self.verity_size = None
def CalculateDynamicPartitionSize(self, image_size):
# This needs to be implemented. Note that returning the given image size as
# the partition size doesn't make sense, as it will fail later.
raise NotImplementedError
def CalculateMaxImageSize(self, partition_size=None):
"""Calculates the max image size by accounting for the verity metadata.
Args:
partition_size: The partition size, which defaults to self.partition_size
if unspecified.
Returns:
The size of the image adjusted for verity metadata.
"""
if partition_size is None:
partition_size = self.partition_size
assert partition_size > 0, \
"Invalid partition size: {}".format(partition_size)
hi = partition_size
if hi % BLOCK_SIZE != 0:
hi = (hi // BLOCK_SIZE) * BLOCK_SIZE
# verity tree and fec sizes depend on the partition size, which
# means this estimate is always going to be unnecessarily small
verity_size = GetVeritySize(hi, self.fec_supported)
lo = partition_size - verity_size
result = lo
# do a binary search for the optimal size
while lo < hi:
i = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
v = GetVeritySize(i, self.fec_supported)
if i + v <= partition_size:
if result < i:
result = i
verity_size = v
lo = i + BLOCK_SIZE
else:
hi = i
self.image_size = result
self.verity_size = verity_size
logger.info(
"Calculated image size for verity: partition_size %d, image_size %d, "
"verity_size %d", partition_size, result, verity_size)
return result
def Build(self, out_file):
"""Creates an image that is verifiable using dm-verity.
Args:
out_file: the output image.
Returns:
AssertionError: On invalid partition sizes.
BuildVerityImageError: On other errors.
"""
image_size = int(self.image_size)
tempdir_name = common.MakeTempDir(suffix="_verity_images")
# Get partial image paths.
verity_image_path = os.path.join(tempdir_name, "verity.img")
verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
# Build the verity tree and get the root hash and salt.
root_hash, salt = BuildVerityTree(out_file, verity_image_path)
# Build the metadata blocks.
BuildVerityMetadata(
image_size, verity_metadata_path, root_hash, salt, self.block_device,
self.signer_path, self.signer_key, self.signer_args,
self.verity_disable)
padding_size = self.partition_size - self.image_size - self.verity_size
assert padding_size >= 0
# Build the full verified image.
Append(
verity_image_path, verity_metadata_path,
"Failed to append verity metadata")
if self.fec_supported:
# Build FEC for the entire partition, including metadata.
verity_fec_path = os.path.join(tempdir_name, "verity_fec.img")
BuildVerityFEC(
out_file, verity_image_path, verity_fec_path, padding_size)
Append(verity_image_path, verity_fec_path, "Failed to append FEC")
Append2Simg(
out_file, verity_image_path, "Failed to append verity data")
def PadSparseImage(self, out_file):
sparse_image_size = GetSimgSize(out_file)
if sparse_image_size > self.image_size:
raise BuildVerityImageError(
"Error: image size of {} is larger than partition size of "
"{}".format(sparse_image_size, self.image_size))
ZeroPadSimg(out_file, self.image_size - sparse_image_size)
class VerifiedBootVersion2VerityImageBuilder(VerityImageBuilder):
"""A VerityImageBuilder for Verified Boot 2.0."""
AVB_HASH_FOOTER = 1
AVB_HASHTREE_FOOTER = 2
def __init__(self, partition_name, partition_size, footer_type, avbtool,
key_path, algorithm, salt, signing_args):
self.version = 2
self.partition_name = partition_name
self.partition_size = partition_size
self.footer_type = footer_type
self.avbtool = avbtool
self.algorithm = algorithm
self.key_path = key_path
self.salt = salt
self.signing_args = signing_args
self.image_size = None
def CalculateMinPartitionSize(self, image_size, size_calculator=None):
"""Calculates min partition size for a given image size.
This is used when determining the partition size for a dynamic partition,
which should be cover the given image size (for filesystem files) as well as
the verity metadata size.
Args:
image_size: The size of the image in question.
size_calculator: The function to calculate max image size
for a given partition size.
Returns:
The minimum partition size required to accommodate the image size.
"""
if size_calculator is None:
size_calculator = self.CalculateMaxImageSize
# Use image size as partition size to approximate final partition size.
image_ratio = size_calculator(image_size) / float(image_size)
# Prepare a binary search for the optimal partition size.
lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - BLOCK_SIZE
# Ensure lo is small enough: max_image_size should <= image_size.
delta = BLOCK_SIZE
max_image_size = size_calculator(lo)
while max_image_size > image_size:
image_ratio = max_image_size / float(lo)
lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - delta
delta *= 2
max_image_size = size_calculator(lo)
hi = lo + BLOCK_SIZE
# Ensure hi is large enough: max_image_size should >= image_size.
delta = BLOCK_SIZE
max_image_size = size_calculator(hi)
while max_image_size < image_size:
image_ratio = max_image_size / float(hi)
hi = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE + delta
delta *= 2
max_image_size = size_calculator(hi)
partition_size = hi
# Start to binary search.
while lo < hi:
mid = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
max_image_size = size_calculator(mid)
if max_image_size >= image_size: # if mid can accommodate image_size
if mid < partition_size: # if a smaller partition size is found
partition_size = mid
hi = mid
else:
lo = mid + BLOCK_SIZE
logger.info(
"CalculateMinPartitionSize(%d): partition_size %d.", image_size,
partition_size)
return partition_size
def CalculateDynamicPartitionSize(self, image_size):
self.partition_size = self.CalculateMinPartitionSize(image_size)
return self.partition_size
def CalculateMaxImageSize(self, partition_size=None):
"""Calculates max image size for a given partition size.
Args:
partition_size: The partition size, which defaults to self.partition_size
if unspecified.
Returns:
The maximum image size.
Raises:
BuildVerityImageError: On error or getting invalid image size.
"""
if partition_size is None:
partition_size = self.partition_size
assert partition_size > 0, \
"Invalid partition size: {}".format(partition_size)
add_footer = ("add_hash_footer" if self.footer_type == self.AVB_HASH_FOOTER
else "add_hashtree_footer")
cmd = [self.avbtool, add_footer, "--partition_size",
str(partition_size), "--calc_max_image_size"]
cmd.extend(shlex.split(self.signing_args))
proc = common.Run(cmd)
output, _ = proc.communicate()
if proc.returncode != 0:
raise BuildVerityImageError(
"Failed to calculate max image size:\n{}".format(output))
image_size = int(output)
if image_size <= 0:
raise BuildVerityImageError(
"Invalid max image size: {}".format(output))
self.image_size = image_size
return image_size
def PadSparseImage(self, out_file):
# No-op as the padding is taken care of by avbtool.
pass
def Build(self, out_file):
"""Adds dm-verity hashtree and AVB metadata to an image.
Args:
out_file: Path to image to modify.
"""
add_footer = ("add_hash_footer" if self.footer_type == self.AVB_HASH_FOOTER
else "add_hashtree_footer")
cmd = [self.avbtool, add_footer,
"--partition_size", str(self.partition_size),
"--partition_name", self.partition_name,
"--image", out_file]
if self.key_path and self.algorithm:
cmd.extend(["--key", self.key_path, "--algorithm", self.algorithm])
if self.salt:
cmd.extend(["--salt", self.salt])
cmd.extend(shlex.split(self.signing_args))
proc = common.Run(cmd)
output, _ = proc.communicate()
if proc.returncode != 0:
raise BuildVerityImageError("Failed to add AVB footer: {}".format(output))
class HashtreeInfoGenerationError(Exception):
"""An Exception raised during hashtree info generation."""
def __init__(self, message):
Exception.__init__(self, message)
class HashtreeInfo(object):
def __init__(self):
self.hashtree_range = None
self.filesystem_range = None
self.hash_algorithm = None
self.salt = None
self.root_hash = None
def CreateHashtreeInfoGenerator(partition_name, block_size, info_dict):
generator = None
if (info_dict.get("verity") == "true" and
info_dict.get("{}_verity_block_device".format(partition_name))):
partition_size = info_dict["{}_size".format(partition_name)]
fec_supported = info_dict.get("verity_fec") == "true"
generator = VerifiedBootVersion1HashtreeInfoGenerator(
partition_size, block_size, fec_supported)
return generator
class HashtreeInfoGenerator(object):
def Generate(self, image):
raise NotImplementedError
def DecomposeSparseImage(self, image):
raise NotImplementedError
def ValidateHashtree(self):
raise NotImplementedError
class VerifiedBootVersion1HashtreeInfoGenerator(HashtreeInfoGenerator):
"""A class that parses the metadata of hashtree for a given partition."""
def __init__(self, partition_size, block_size, fec_supported):
"""Initialize VerityTreeInfo with the sparse image and input property.
Arguments:
partition_size: The whole size in bytes of a partition, including the
filesystem size, padding size, and verity size.
block_size: Expected size in bytes of each block for the sparse image.
fec_supported: True if the verity section contains fec data.
"""
self.block_size = block_size
self.partition_size = partition_size
self.fec_supported = fec_supported
self.image = None
self.filesystem_size = None
self.hashtree_size = None
self.metadata_size = None
prop_dict = {
'partition_size': str(partition_size),
'verity': 'true',
'verity_fec': 'true' if fec_supported else None,
# 'verity_block_device' needs to be present to indicate a verity-enabled
# partition.
'verity_block_device': '',
# We don't need the following properties that are needed for signing the
# verity metadata.
'verity_key': '',
'verity_signer_cmd': None,
}
self.verity_image_builder = CreateVerityImageBuilder(prop_dict)
self.hashtree_info = HashtreeInfo()
def DecomposeSparseImage(self, image):
"""Calculate the verity size based on the size of the input image.
Since we already know the structure of a verity enabled image to be:
[filesystem, verity_hashtree, verity_metadata, fec_data]. We can then
calculate the size and offset of each section.
"""
self.image = image
assert self.block_size == image.blocksize
assert self.partition_size == image.total_blocks * self.block_size, \
"partition size {} doesn't match with the calculated image size." \
" total_blocks: {}".format(self.partition_size, image.total_blocks)
adjusted_size = self.verity_image_builder.CalculateMaxImageSize()
assert adjusted_size % self.block_size == 0
verity_tree_size = GetVerityTreeSize(adjusted_size)
assert verity_tree_size % self.block_size == 0
metadata_size = GetVerityMetadataSize(adjusted_size)
assert metadata_size % self.block_size == 0
self.filesystem_size = adjusted_size
self.hashtree_size = verity_tree_size
self.metadata_size = metadata_size
self.hashtree_info.filesystem_range = RangeSet(
data=[0, adjusted_size // self.block_size])
self.hashtree_info.hashtree_range = RangeSet(
data=[adjusted_size // self.block_size,
(adjusted_size + verity_tree_size) // self.block_size])
def _ParseHashtreeMetadata(self):
"""Parses the hash_algorithm, root_hash, salt from the metadata block."""
metadata_start = self.filesystem_size + self.hashtree_size
metadata_range = RangeSet(
data=[metadata_start // self.block_size,
(metadata_start + self.metadata_size) // self.block_size])
meta_data = b''.join(self.image.ReadRangeSet(metadata_range))
# More info about the metadata structure available in:
# system/extras/verity/build_verity_metadata.py
META_HEADER_SIZE = 268
header_bin = meta_data[0:META_HEADER_SIZE]
header = struct.unpack("II256sI", header_bin)
# header: magic_number, version, signature, table_len
assert header[0] == 0xb001b001, header[0]
table_len = header[3]
verity_table = meta_data[META_HEADER_SIZE: META_HEADER_SIZE + table_len]
table_entries = verity_table.rstrip().split()
# Expected verity table format: "1 block_device block_device block_size
# block_size data_blocks data_blocks hash_algorithm root_hash salt"
assert len(table_entries) == 10, "Unexpected verity table size {}".format(
len(table_entries))
assert (int(table_entries[3]) == self.block_size and
int(table_entries[4]) == self.block_size)
assert (int(table_entries[5]) * self.block_size == self.filesystem_size and
int(table_entries[6]) * self.block_size == self.filesystem_size)
self.hashtree_info.hash_algorithm = table_entries[7].decode()
self.hashtree_info.root_hash = table_entries[8].decode()
self.hashtree_info.salt = table_entries[9].decode()
def ValidateHashtree(self):
"""Checks that we can reconstruct the verity hash tree."""
# Writes the filesystem section to a temp file; and calls the executable
# build_verity_tree to construct the hash tree.
adjusted_partition = common.MakeTempFile(prefix="adjusted_partition")
with open(adjusted_partition, "wb") as fd:
self.image.WriteRangeDataToFd(self.hashtree_info.filesystem_range, fd)
generated_verity_tree = common.MakeTempFile(prefix="verity")
root_hash, salt = BuildVerityTree(adjusted_partition, generated_verity_tree)
# The salt should be always identical, as we use fixed value.
assert salt == self.hashtree_info.salt, \
"Calculated salt {} doesn't match the one in metadata {}".format(
salt, self.hashtree_info.salt)
if root_hash != self.hashtree_info.root_hash:
logger.warning(
"Calculated root hash %s doesn't match the one in metadata %s",
root_hash, self.hashtree_info.root_hash)
return False
# Reads the generated hash tree and checks if it has the exact same bytes
# as the one in the sparse image.
with open(generated_verity_tree, 'rb') as fd:
return fd.read() == b''.join(self.image.ReadRangeSet(
self.hashtree_info.hashtree_range))
def Generate(self, image):
"""Parses and validates the hashtree info in a sparse image.
Returns:
hashtree_info: The information needed to reconstruct the hashtree.
Raises:
HashtreeInfoGenerationError: If we fail to generate the exact bytes of
the hashtree.
"""
self.DecomposeSparseImage(image)
self._ParseHashtreeMetadata()
if not self.ValidateHashtree():
raise HashtreeInfoGenerationError("Failed to reconstruct the verity tree")
return self.hashtree_info
def CreateCustomImageBuilder(info_dict, partition_name, partition_size,
key_path, algorithm, signing_args):
builder = None
if info_dict.get("avb_enable") == "true":
builder = VerifiedBootVersion2VerityImageBuilder(
partition_name,
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASHTREE_FOOTER,
info_dict.get("avb_avbtool"),
key_path,
algorithm,
# Salt is None because custom images have no fingerprint property to be
# used as the salt.
None,
signing_args)
return builder
|
[
"logging.getLogger",
"common.Run",
"common.RunAndCheckOutput",
"rangelib.RangeSet",
"shlex.split",
"common.MakeTempDir",
"struct.unpack",
"sparse_img.SparseImage",
"common.MakeTempFile"
] |
[((794, 821), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (811, 821), False, 'import logging\n'), ((1215, 1259), 'common.RunAndCheckOutput', 'common.RunAndCheckOutput', (['cmd'], {'verbose': '(False)'}), '(cmd, verbose=False)\n', (1239, 1259), False, 'import common\n'), ((1382, 1426), 'common.RunAndCheckOutput', 'common.RunAndCheckOutput', (['cmd'], {'verbose': '(False)'}), '(cmd, verbose=False)\n', (1406, 1426), False, 'import common\n'), ((1559, 1603), 'common.RunAndCheckOutput', 'common.RunAndCheckOutput', (['cmd'], {'verbose': '(False)'}), '(cmd, verbose=False)\n', (1583, 1603), False, 'import common\n'), ((2012, 2063), 'sparse_img.SparseImage', 'sparse_img.SparseImage', (['image_file'], {'build_map': '(False)'}), '(image_file, build_map=False)\n', (2034, 2063), False, 'import sparse_img\n'), ((2256, 2319), 'sparse_img.SparseImage', 'sparse_img.SparseImage', (['image_file'], {'mode': '"""r+b"""', 'build_map': '(False)'}), "(image_file, mode='r+b', build_map=False)\n", (2278, 2319), False, 'import sparse_img\n'), ((2565, 2594), 'common.RunAndCheckOutput', 'common.RunAndCheckOutput', (['cmd'], {}), '(cmd)\n', (2589, 2594), False, 'import common\n'), ((2762, 2791), 'common.RunAndCheckOutput', 'common.RunAndCheckOutput', (['cmd'], {}), '(cmd)\n', (2786, 2791), False, 'import common\n'), ((3311, 3340), 'common.RunAndCheckOutput', 'common.RunAndCheckOutput', (['cmd'], {}), '(cmd)\n', (3335, 3340), False, 'import common\n'), ((3724, 3753), 'common.RunAndCheckOutput', 'common.RunAndCheckOutput', (['cmd'], {}), '(cmd)\n', (3748, 3753), False, 'import common\n'), ((10282, 10325), 'common.MakeTempDir', 'common.MakeTempDir', ([], {'suffix': '"""_verity_images"""'}), "(suffix='_verity_images')\n", (10300, 10325), False, 'import common\n'), ((15728, 15743), 'common.Run', 'common.Run', (['cmd'], {}), '(cmd)\n', (15738, 15743), False, 'import common\n'), ((16900, 16915), 'common.Run', 'common.Run', (['cmd'], {}), '(cmd)\n', (16910, 16915), False, 'import common\n'), ((20633, 20685), 'rangelib.RangeSet', 'RangeSet', ([], {'data': '[0, adjusted_size // self.block_size]'}), '(data=[0, adjusted_size // self.block_size])\n', (20641, 20685), False, 'from rangelib import RangeSet\n'), ((20735, 20843), 'rangelib.RangeSet', 'RangeSet', ([], {'data': '[adjusted_size // self.block_size, (adjusted_size + verity_tree_size) //\n self.block_size]'}), '(data=[adjusted_size // self.block_size, (adjusted_size +\n verity_tree_size) // self.block_size])\n', (20743, 20843), False, 'from rangelib import RangeSet\n'), ((21063, 21176), 'rangelib.RangeSet', 'RangeSet', ([], {'data': '[metadata_start // self.block_size, (metadata_start + self.metadata_size) //\n self.block_size]'}), '(data=[metadata_start // self.block_size, (metadata_start + self.\n metadata_size) // self.block_size])\n', (21071, 21176), False, 'from rangelib import RangeSet\n'), ((21460, 21496), 'struct.unpack', 'struct.unpack', (['"""II256sI"""', 'header_bin'], {}), "('II256sI', header_bin)\n", (21473, 21496), False, 'import struct\n'), ((22712, 22760), 'common.MakeTempFile', 'common.MakeTempFile', ([], {'prefix': '"""adjusted_partition"""'}), "(prefix='adjusted_partition')\n", (22731, 22760), False, 'import common\n'), ((22914, 22950), 'common.MakeTempFile', 'common.MakeTempFile', ([], {'prefix': '"""verity"""'}), "(prefix='verity')\n", (22933, 22950), False, 'import common\n'), ((15684, 15714), 'shlex.split', 'shlex.split', (['self.signing_args'], {}), '(self.signing_args)\n', (15695, 15714), False, 'import shlex\n'), ((16856, 16886), 'shlex.split', 'shlex.split', (['self.signing_args'], {}), '(self.signing_args)\n', (16867, 16886), False, 'import shlex\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import tvm
@tvm.tag_scope(tag="conv")
def compute_conv(data, weight):
N, IC, H, W = data.shape
OC, IC, KH, KW = weight.shape
OH = H - KH + 1
OW = W - KW + 1
ic = tvm.reduce_axis((0, IC), name='ic')
dh = tvm.reduce_axis((0, KH), name='dh')
dw = tvm.reduce_axis((0, KW), name='dw')
return tvm.compute((N, OC, OH, OW), lambda i, oc, h, w: \
tvm.sum(data[i, ic, h+dh, w+dw] * weight[oc, ic, dh, dw],
axis=[ic, dh, dw]))
def test_with():
n = tvm.size_var('n')
m = tvm.size_var('m')
l = tvm.size_var('l')
A = tvm.placeholder((n, l), name='A')
B = tvm.placeholder((m, l), name='B')
with tvm.tag_scope(tag="gemm"):
k = tvm.reduce_axis((0, l), name='k')
C = tvm.compute((n, m), lambda i, j: tvm.sum(A[i, k] * B[j, k], axis=k),
attrs={"hello" : 1, "arr": [10, 12]})
assert C.op.tag == 'gemm'
assert "hello" in C.op.attrs
assert "xx" not in C.op.attrs
assert C.op.attrs["hello"].value == 1
CC = tvm.load_json(tvm.save_json(C))
assert CC.op.attrs["hello"].value == 1
assert CC.op.attrs["arr"][0].value == 10
# str format happened to be json compatible
assert json.loads(str(CC.op.attrs))["arr"][1] == 12
def test_decorator():
n = tvm.size_var('n')
c = tvm.size_var('c')
h = tvm.size_var('h')
w = tvm.size_var('w')
kh = tvm.size_var('kh')
kw = tvm.size_var('kw')
A = tvm.placeholder((n, c, h, w), name='A')
B = tvm.placeholder((c, c, kh, kw), name='B')
C = compute_conv(A, B)
assert C.op.tag == 'conv'
assert len(C.op.attrs) == 0
def test_nested():
n = tvm.size_var('n')
c = tvm.size_var('c')
h = tvm.size_var('h')
w = tvm.size_var('w')
kh = tvm.size_var('kh')
kw = tvm.size_var('kw')
A = tvm.placeholder((n, c, h, w), name='A')
B = tvm.placeholder((c, c, kh, kw), name='B')
try:
with tvm.tag_scope(tag='conv'):
C = compute_conv(A, B)
assert False
except ValueError:
pass
if __name__ == "__main__":
test_with()
test_decorator()
test_nested()
|
[
"tvm.sum",
"tvm.size_var",
"tvm.reduce_axis",
"tvm.tag_scope",
"tvm.placeholder",
"tvm.save_json"
] |
[((810, 835), 'tvm.tag_scope', 'tvm.tag_scope', ([], {'tag': '"""conv"""'}), "(tag='conv')\n", (823, 835), False, 'import tvm\n'), ((981, 1016), 'tvm.reduce_axis', 'tvm.reduce_axis', (['(0, IC)'], {'name': '"""ic"""'}), "((0, IC), name='ic')\n", (996, 1016), False, 'import tvm\n'), ((1026, 1061), 'tvm.reduce_axis', 'tvm.reduce_axis', (['(0, KH)'], {'name': '"""dh"""'}), "((0, KH), name='dh')\n", (1041, 1061), False, 'import tvm\n'), ((1071, 1106), 'tvm.reduce_axis', 'tvm.reduce_axis', (['(0, KW)'], {'name': '"""dw"""'}), "((0, KW), name='dw')\n", (1086, 1106), False, 'import tvm\n'), ((1298, 1315), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (1310, 1315), False, 'import tvm\n'), ((1324, 1341), 'tvm.size_var', 'tvm.size_var', (['"""m"""'], {}), "('m')\n", (1336, 1341), False, 'import tvm\n'), ((1350, 1367), 'tvm.size_var', 'tvm.size_var', (['"""l"""'], {}), "('l')\n", (1362, 1367), False, 'import tvm\n'), ((1377, 1410), 'tvm.placeholder', 'tvm.placeholder', (['(n, l)'], {'name': '"""A"""'}), "((n, l), name='A')\n", (1392, 1410), False, 'import tvm\n'), ((1419, 1452), 'tvm.placeholder', 'tvm.placeholder', (['(m, l)'], {'name': '"""B"""'}), "((m, l), name='B')\n", (1434, 1452), False, 'import tvm\n'), ((2083, 2100), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (2095, 2100), False, 'import tvm\n'), ((2109, 2126), 'tvm.size_var', 'tvm.size_var', (['"""c"""'], {}), "('c')\n", (2121, 2126), False, 'import tvm\n'), ((2135, 2152), 'tvm.size_var', 'tvm.size_var', (['"""h"""'], {}), "('h')\n", (2147, 2152), False, 'import tvm\n'), ((2161, 2178), 'tvm.size_var', 'tvm.size_var', (['"""w"""'], {}), "('w')\n", (2173, 2178), False, 'import tvm\n'), ((2188, 2206), 'tvm.size_var', 'tvm.size_var', (['"""kh"""'], {}), "('kh')\n", (2200, 2206), False, 'import tvm\n'), ((2216, 2234), 'tvm.size_var', 'tvm.size_var', (['"""kw"""'], {}), "('kw')\n", (2228, 2234), False, 'import tvm\n'), ((2244, 2283), 'tvm.placeholder', 'tvm.placeholder', (['(n, c, h, w)'], {'name': '"""A"""'}), "((n, c, h, w), name='A')\n", (2259, 2283), False, 'import tvm\n'), ((2292, 2333), 'tvm.placeholder', 'tvm.placeholder', (['(c, c, kh, kw)'], {'name': '"""B"""'}), "((c, c, kh, kw), name='B')\n", (2307, 2333), False, 'import tvm\n'), ((2451, 2468), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (2463, 2468), False, 'import tvm\n'), ((2477, 2494), 'tvm.size_var', 'tvm.size_var', (['"""c"""'], {}), "('c')\n", (2489, 2494), False, 'import tvm\n'), ((2503, 2520), 'tvm.size_var', 'tvm.size_var', (['"""h"""'], {}), "('h')\n", (2515, 2520), False, 'import tvm\n'), ((2529, 2546), 'tvm.size_var', 'tvm.size_var', (['"""w"""'], {}), "('w')\n", (2541, 2546), False, 'import tvm\n'), ((2556, 2574), 'tvm.size_var', 'tvm.size_var', (['"""kh"""'], {}), "('kh')\n", (2568, 2574), False, 'import tvm\n'), ((2584, 2602), 'tvm.size_var', 'tvm.size_var', (['"""kw"""'], {}), "('kw')\n", (2596, 2602), False, 'import tvm\n'), ((2612, 2651), 'tvm.placeholder', 'tvm.placeholder', (['(n, c, h, w)'], {'name': '"""A"""'}), "((n, c, h, w), name='A')\n", (2627, 2651), False, 'import tvm\n'), ((2660, 2701), 'tvm.placeholder', 'tvm.placeholder', (['(c, c, kh, kw)'], {'name': '"""B"""'}), "((c, c, kh, kw), name='B')\n", (2675, 2701), False, 'import tvm\n'), ((1462, 1487), 'tvm.tag_scope', 'tvm.tag_scope', ([], {'tag': '"""gemm"""'}), "(tag='gemm')\n", (1475, 1487), False, 'import tvm\n'), ((1501, 1534), 'tvm.reduce_axis', 'tvm.reduce_axis', (['(0, l)'], {'name': '"""k"""'}), "((0, l), name='k')\n", (1516, 1534), False, 'import tvm\n'), ((1841, 1857), 'tvm.save_json', 'tvm.save_json', (['C'], {}), '(C)\n', (1854, 1857), False, 'import tvm\n'), ((1178, 1263), 'tvm.sum', 'tvm.sum', (['(data[i, ic, h + dh, w + dw] * weight[oc, ic, dh, dw])'], {'axis': '[ic, dh, dw]'}), '(data[i, ic, h + dh, w + dw] * weight[oc, ic, dh, dw], axis=[ic, dh, dw]\n )\n', (1185, 1263), False, 'import tvm\n'), ((2724, 2749), 'tvm.tag_scope', 'tvm.tag_scope', ([], {'tag': '"""conv"""'}), "(tag='conv')\n", (2737, 2749), False, 'import tvm\n'), ((1580, 1614), 'tvm.sum', 'tvm.sum', (['(A[i, k] * B[j, k])'], {'axis': 'k'}), '(A[i, k] * B[j, k], axis=k)\n', (1587, 1614), False, 'import tvm\n')]
|
#!/usr/bin/env python
"""
The pozyx ranging demo (c) Pozyx Labs
please check out https://www.pozyx.io/Documentation/Tutorials/getting_started/Python
This demo requires one (or two) pozyx shields. It demonstrates the 3D orientation and the functionality
to remotely read register data from a pozyx device. Connect one of the Pozyx devices with USB and run this script.
This demo reads the following sensor data:
- pressure
- acceleration
- magnetic field strength
- angular velocity
- the heading, roll and pitch
- the quaternion rotation describing the 3D orientation of the device. This can be used to transform from the body coordinate system to the world coordinate system.
- the linear acceleration (the acceleration excluding gravity)
- the gravitational vector
The data can be viewed in the Processing sketch orientation_3D.pde
"""
from time import time
from time import sleep
from pypozyx import *
from pypozyx.definitions.bitmasks import POZYX_INT_MASK_IMU
from pythonosc.osc_message_builder import OscMessageBuilder
from pythonosc.udp_client import SimpleUDPClient
from modules.user_input_config_functions import UserInputConfigFunctions as UserInput
from modules.file_writing import SensorAndPositionFileWriting as FileWriting
from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging
import time as t
class Orientation3D(object):
"""Reads out all sensor data from either a local or remote Pozyx"""
def __init__(self, pozyx, osc_udp_client, anchors, algorithm=POZYX_POS_ALG_UWB_ONLY,
dimension=POZYX_3D, height=1000, remote_id=None):
self.pozyx = pozyx
self.osc_udp_client = osc_udp_client
self.anchors = anchors
self.algorithm = algorithm
self.dimension = dimension
self.height = height
self.remote_id = remote_id
def setup(self):
"""There is no specific setup functionality"""
self.current_time = time()
"""Sets up the Pozyx for positioning by calibrating its anchor list."""
print("------------POZYX POSITIONING V1.0 -------------")
print("NOTES: ")
print("- No parameters required.")
print()
print("- System will auto start configuration")
print()
print("- System will auto start positioning")
print("------------POZYX POSITIONING V1.0 --------------")
print()
print("START Ranging: ")
self.pozyx.clearDevices(self.remote_id)
self.setAnchorsManual()
self.printPublishConfigurationResult()
def loop(self):
"""Gets new IMU sensor data"""
# check sensor data status
sensor_data = SensorData()
calibration_status = SingleRegister()
if self.remote_id is not None or self.pozyx.checkForFlag(POZYX_INT_MASK_IMU, 0.01) == POZYX_SUCCESS:
status = self.pozyx.getAllSensorData(sensor_data, self.remote_id)
status &= self.pozyx.getCalibrationStatus(calibration_status, self.remote_id)
if status == POZYX_SUCCESS:
# check position status
position = Coordinates()
status = self.pozyx.doPositioning(
position, self.dimension, self.height, self.algorithm, remote_id=self.remote_id)
if status == POZYX_SUCCESS:
# self.print_publish_position(position)
self.publishSensorData(sensor_data, calibration_status)
return sensor_data, position
else:
pass
# self.print_publish_error_code("positioning")
return "Error, no data to print for this line"
def publishSensorData(self, sensor_data, calibration_status):
"""Makes the OSC sensor data package and publishes it"""
self.msg_builder = OscMessageBuilder("/sensordata")
self.msg_builder.add_arg(int(1000 * (time() - self.current_time)))
current_time = time()
self.addSensorData(sensor_data)
self.addCalibrationStatus(calibration_status)
self.osc_udp_client.send(self.msg_builder.build())
def addSensorData(self, sensor_data):
"""Adds the sensor data to the OSC message"""
self.msg_builder.add_arg(sensor_data.pressure)
self.addComponentsOSC(sensor_data.acceleration)
self.addComponentsOSC(sensor_data.magnetic)
self.addComponentsOSC(sensor_data.angular_vel)
self.addComponentsOSC(sensor_data.euler_angles)
self.addComponentsOSC(sensor_data.quaternion)
self.addComponentsOSC(sensor_data.linear_acceleration)
self.addComponentsOSC(sensor_data.gravity_vector)
def addComponentsOSC(self, component):
"""Adds a sensor data component to the OSC message"""
for data in component.data:
self.msg_builder.add_arg(float(data))
def addCalibrationStatus(self, calibration_status):
"""Adds the calibration status data to the OSC message"""
self.msg_builder.add_arg(calibration_status[0] & 0x03)
self.msg_builder.add_arg((calibration_status[0] & 0x0C) >> 2)
self.msg_builder.add_arg((calibration_status[0] & 0x30) >> 4)
self.msg_builder.add_arg((calibration_status[0] & 0xC0) >> 6)
def setAnchorsManual(self):
"""Adds the manually measured anchors to the Pozyx's device list one for one."""
status = self.pozyx.clearDevices(self.remote_id)
for anchor in self.anchors:
status &= self.pozyx.addDevice(anchor, self.remote_id)
if len(anchors) > 4:
status &= self.pozyx.setSelectionOfAnchors(POZYX_ANCHOR_SEL_AUTO, len(anchors))
return status
def printPublishConfigurationResult(self):
"""Prints and potentially publishes the anchor configuration result in a human-readable way."""
list_size = SingleRegister()
status = self.pozyx.getDeviceListSize(list_size, self.remote_id)
print("List size: {0}".format(list_size[0]))
if list_size[0] != len(self.anchors):
self.printPublishErrorCode("configuration")
return
device_list = DeviceList(list_size=list_size[0])
status = self.pozyx.getDeviceIds(device_list, self.remote_id)
print("Calibration result:")
print("Anchors found: {0}".format(list_size[0]))
print("Anchor IDs: ", device_list)
for i in range(list_size[0]):
anchor_coordinates = Coordinates()
status = self.pozyx.getDeviceCoordinates(
device_list[i], anchor_coordinates, self.remote_id)
print("ANCHOR,0x%0.4x, %s" % (device_list[i], str(anchor_coordinates)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message(
"/anchor", [device_list[i], int(anchor_coordinates.x), int(anchor_coordinates.y), int(anchor_coordinates.z)])
sleep(0.025)
def printPublishErrorCode(self, operation):
"""Prints the Pozyx's error and possibly sends it as a OSC packet"""
error_code = SingleRegister()
network_id = self.remote_id
if network_id is None:
self.pozyx.getErrorCode(error_code)
print("ERROR %s, local error code %s" % (operation, str(error_code)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message("/error", [operation, 0, error_code[0]])
return
status = self.pozyx.getErrorCode(error_code, self.remote_id)
if status == POZYX_SUCCESS:
print("ERROR %s on ID %s, error code %s" %
(operation, "0x%0.4x" % network_id, str(error_code)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message(
"/error", [operation, network_id, error_code[0]])
else:
self.pozyx.getErrorCode(error_code)
print("ERROR %s, couldn't retrieve remote error code, local error code %s" %
(operation, str(error_code)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message("/error", [operation, 0, -1])
# should only happen when not being able to communicate with a remote Pozyx.
if __name__ == '__main__':
# shortcut to not have to find out the port yourself
serial_port = get_serial_ports()[0].device
remote_id = 0x6110 # remote device network ID
remote = True # whether to use a remote device
# if not remote:
# remote_id = None
index = 0
previous_cycle_time = 0
current_cycle_time = 0
attributes_to_log = ["acceleration"]
to_use_file = False
filename = None
"""User input configuration section, comment out to use above settings"""
remote = UserInput.use_remote()
remote_id = UserInput.get_remote_id(remote)
to_use_file = UserInput.use_file()
filename = UserInput.get_filename(to_use_file)
attributes_to_log = UserInput.get_multiple_attributes_to_log()
use_processing = True
ip = "127.0.0.1"
network_port = 8888
anchors = [DeviceCoordinates(0x6863, 1, Coordinates(0, 0, 2000)),
DeviceCoordinates(0x615a, 1, Coordinates(0, 18288, 1000)),
DeviceCoordinates(0x607c, 1, Coordinates(18288, 0, 1000)),
DeviceCoordinates(0x6134, 1, Coordinates(18288, 18288, 2000))]
# algorithm = POZYX_POS_ALG_UWB_ONLY # positioning algorithm to use
algorithm = POZYX_POS_ALG_TRACKING # tracking positioning algorithm
dimension = POZYX_3D # positioning dimension
height = 1000 # height of device, required in 2.5D positioning
pozyx = PozyxSerial(serial_port)
osc_udp_client = SimpleUDPClient(ip, network_port)
o = Orientation3D(pozyx, osc_udp_client, anchors, algorithm, dimension, height, remote_id)
o.setup()
logfile = None
if to_use_file:
logfile = open(filename, 'a')
FileWriting.write_sensor_and_position_header_to_file(logfile)
start = ConsoleLogging.get_time()
try:
while True:
# updates elapsed time and time difference
elapsed = ConsoleLogging.get_elapsed_time(ConsoleLogging, start)
previous_cycle_time = current_cycle_time
current_cycle_time = elapsed
time_difference = current_cycle_time - previous_cycle_time
# store iterate_file returns as a tuple or an error message
loop_results = o.loop()
if type(loop_results) == tuple:
one_cycle_sensor_data, one_cycle_position = loop_results
formatted_data_dictionary = ConsoleLogging.format_sensor_data(
one_cycle_sensor_data, attributes_to_log)
if type(formatted_data_dictionary) == dict:
formatted_data_dictionary["Position"] = [
"x:", one_cycle_position.x, "y:", one_cycle_position.y, "z:", one_cycle_position.z]
ConsoleLogging.log_sensor_data_to_console(index, elapsed, formatted_data_dictionary)
if to_use_file:
FileWriting.write_sensor_and_position_data_to_file(
index, elapsed, time_difference,
logfile, one_cycle_sensor_data, one_cycle_position)
# if the iterate_file didn't return a tuple, it returned an error string
else:
error_string = loop_results
ConsoleLogging.print_data_error_message(index, elapsed, error_string)
index += 1 # increment data index
# this allows Windows users to exit the while iterate_file by pressing ctrl+c
except KeyboardInterrupt:
pass
if to_use_file:
logfile.close()
|
[
"pythonosc.udp_client.SimpleUDPClient",
"modules.file_writing.SensorAndPositionFileWriting.write_sensor_and_position_header_to_file",
"modules.console_logging_functions.ConsoleLoggingFunctions.get_time",
"modules.console_logging_functions.ConsoleLoggingFunctions.print_data_error_message",
"modules.user_input_config_functions.UserInputConfigFunctions.get_multiple_attributes_to_log",
"modules.user_input_config_functions.UserInputConfigFunctions.use_file",
"modules.file_writing.SensorAndPositionFileWriting.write_sensor_and_position_data_to_file",
"time.sleep",
"modules.console_logging_functions.ConsoleLoggingFunctions.log_sensor_data_to_console",
"modules.console_logging_functions.ConsoleLoggingFunctions.get_elapsed_time",
"modules.user_input_config_functions.UserInputConfigFunctions.get_remote_id",
"modules.console_logging_functions.ConsoleLoggingFunctions.format_sensor_data",
"modules.user_input_config_functions.UserInputConfigFunctions.get_filename",
"time.time",
"modules.user_input_config_functions.UserInputConfigFunctions.use_remote",
"pythonosc.osc_message_builder.OscMessageBuilder"
] |
[((8855, 8877), 'modules.user_input_config_functions.UserInputConfigFunctions.use_remote', 'UserInput.use_remote', ([], {}), '()\n', (8875, 8877), True, 'from modules.user_input_config_functions import UserInputConfigFunctions as UserInput\n'), ((8894, 8925), 'modules.user_input_config_functions.UserInputConfigFunctions.get_remote_id', 'UserInput.get_remote_id', (['remote'], {}), '(remote)\n', (8917, 8925), True, 'from modules.user_input_config_functions import UserInputConfigFunctions as UserInput\n'), ((8944, 8964), 'modules.user_input_config_functions.UserInputConfigFunctions.use_file', 'UserInput.use_file', ([], {}), '()\n', (8962, 8964), True, 'from modules.user_input_config_functions import UserInputConfigFunctions as UserInput\n'), ((8980, 9015), 'modules.user_input_config_functions.UserInputConfigFunctions.get_filename', 'UserInput.get_filename', (['to_use_file'], {}), '(to_use_file)\n', (9002, 9015), True, 'from modules.user_input_config_functions import UserInputConfigFunctions as UserInput\n'), ((9040, 9082), 'modules.user_input_config_functions.UserInputConfigFunctions.get_multiple_attributes_to_log', 'UserInput.get_multiple_attributes_to_log', ([], {}), '()\n', (9080, 9082), True, 'from modules.user_input_config_functions import UserInputConfigFunctions as UserInput\n'), ((9776, 9809), 'pythonosc.udp_client.SimpleUDPClient', 'SimpleUDPClient', (['ip', 'network_port'], {}), '(ip, network_port)\n', (9791, 9809), False, 'from pythonosc.udp_client import SimpleUDPClient\n'), ((10080, 10105), 'modules.console_logging_functions.ConsoleLoggingFunctions.get_time', 'ConsoleLogging.get_time', ([], {}), '()\n', (10103, 10105), True, 'from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging\n'), ((1949, 1955), 'time.time', 'time', ([], {}), '()\n', (1953, 1955), False, 'from time import time\n'), ((3839, 3871), 'pythonosc.osc_message_builder.OscMessageBuilder', 'OscMessageBuilder', (['"""/sensordata"""'], {}), "('/sensordata')\n", (3856, 3871), False, 'from pythonosc.osc_message_builder import OscMessageBuilder\n'), ((3970, 3976), 'time.time', 'time', ([], {}), '()\n', (3974, 3976), False, 'from time import time\n'), ((10005, 10066), 'modules.file_writing.SensorAndPositionFileWriting.write_sensor_and_position_header_to_file', 'FileWriting.write_sensor_and_position_header_to_file', (['logfile'], {}), '(logfile)\n', (10057, 10066), True, 'from modules.file_writing import SensorAndPositionFileWriting as FileWriting\n'), ((10212, 10266), 'modules.console_logging_functions.ConsoleLoggingFunctions.get_elapsed_time', 'ConsoleLogging.get_elapsed_time', (['ConsoleLogging', 'start'], {}), '(ConsoleLogging, start)\n', (10243, 10266), True, 'from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging\n'), ((6926, 6938), 'time.sleep', 'sleep', (['(0.025)'], {}), '(0.025)\n', (6931, 6938), False, 'from time import sleep\n'), ((10704, 10779), 'modules.console_logging_functions.ConsoleLoggingFunctions.format_sensor_data', 'ConsoleLogging.format_sensor_data', (['one_cycle_sensor_data', 'attributes_to_log'], {}), '(one_cycle_sensor_data, attributes_to_log)\n', (10737, 10779), True, 'from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging\n'), ((11047, 11135), 'modules.console_logging_functions.ConsoleLoggingFunctions.log_sensor_data_to_console', 'ConsoleLogging.log_sensor_data_to_console', (['index', 'elapsed', 'formatted_data_dictionary'], {}), '(index, elapsed,\n formatted_data_dictionary)\n', (11088, 11135), True, 'from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging\n'), ((11532, 11601), 'modules.console_logging_functions.ConsoleLoggingFunctions.print_data_error_message', 'ConsoleLogging.print_data_error_message', (['index', 'elapsed', 'error_string'], {}), '(index, elapsed, error_string)\n', (11571, 11601), True, 'from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging\n'), ((11184, 11323), 'modules.file_writing.SensorAndPositionFileWriting.write_sensor_and_position_data_to_file', 'FileWriting.write_sensor_and_position_data_to_file', (['index', 'elapsed', 'time_difference', 'logfile', 'one_cycle_sensor_data', 'one_cycle_position'], {}), '(index, elapsed,\n time_difference, logfile, one_cycle_sensor_data, one_cycle_position)\n', (11234, 11323), True, 'from modules.file_writing import SensorAndPositionFileWriting as FileWriting\n'), ((3917, 3923), 'time.time', 'time', ([], {}), '()\n', (3921, 3923), False, 'from time import time\n')]
|
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from fixture import DjangoFixture
from fixture.style import NamedDataStyle
from fixture.django_testcase import FixtureTestCase
from dashboard.managers.inventory import InventoryManager
from dashboard.models import Product
from dashboard.tests.testdata.db_fixtures import (
LanguageData, LanguageSetData, PlatformData, ProductData, ReleaseData
)
db_fixture = DjangoFixture(style=NamedDataStyle())
class InventoryManagerTest(FixtureTestCase):
inventory_manager = InventoryManager()
fixture = db_fixture
datasets = [LanguageData, LanguageSetData, PlatformData, ProductData, ReleaseData]
def test_get_locales(self):
"""
Test get_locales
"""
japanese_locale = self.inventory_manager.get_locales(pick_locales=['ja_JP'])
self.assertEqual(len(japanese_locale), 1)
self.assertEqual(japanese_locale[0].lang_name, 'Japanese')
self.assertEqual(japanese_locale[0].locale_alias, 'ja')
self.assertEqual(japanese_locale[0].locale_script, 'Hani')
def test_get_active_locales_count(self):
"""
Test get_active_locales_count
"""
active_locales = self.inventory_manager.get_active_locales_count()
self.assertEqual(active_locales, 3)
def test_get_locale_alias(self):
"""
Test get_locale_alias
"""
locale_alias = self.inventory_manager.get_locale_alias('fr_FR')
self.assertEqual(locale_alias, 'fr')
locale_alias = self.inventory_manager.get_locale_alias('de_DE')
self.assertEqual(locale_alias, 'de_DE')
def test_get_alias_locale(self):
"""
Test get_alias_locale
"""
alias_locale = self.inventory_manager.get_alias_locale('fr')
self.assertEqual(alias_locale, 'fr_FR')
alias_locale = self.inventory_manager.get_alias_locale('de_DE')
self.assertEqual(alias_locale, 'de_DE')
def test_get_locales_set(self):
"""
Test get_locales_set
"""
active_locales, inactive_locales, aliases = \
self.inventory_manager.get_locales_set()
self.assertEqual(len(active_locales), 3)
self.assertEqual(len(inactive_locales), 1)
self.assertEqual(len(aliases), 4)
def test_get_locale_lang_tuple(self):
"""
Test get_locale_lang_tuple
"""
ru_tuple = ('ru_RU', 'Russian')
fr_tuple = ('fr_FR', 'French')
locale_lang_tuple = self.inventory_manager.get_locale_lang_tuple()
self.assertEqual(len(locale_lang_tuple), 3)
locale_lang_tuple = self.inventory_manager.get_locale_lang_tuple(locales=['fr_FR', 'ru_RU'])
self.assertEqual(len(locale_lang_tuple), 2)
self.assertTupleEqual(locale_lang_tuple[0], ru_tuple)
self.assertTupleEqual(locale_lang_tuple[1], fr_tuple)
def test_get_langset(self):
"""
Test get_get_langset
"""
lang_set = self.inventory_manager.get_langset(langset_slug='custom-set')
self.assertEqual(lang_set.lang_set_name, 'Custom Set')
self.assertEqual(lang_set.lang_set_color, 'Peru')
def test_get_langsets(self):
"""
Test get_langsets
"""
lang_sets = self.inventory_manager.get_langsets(
fields=['lang_set_name', 'locale_ids']
)
self.assertEqual(len(lang_sets), 2)
self.assertNotIn('lang_set_color', vars(lang_sets[0]))
self.assertListEqual(lang_sets[0].locale_ids, ['fr_FR', 'ja_JP'])
def test_get_locale_groups(self):
"""
Test get_locale_groups
"""
locale_groups = self.inventory_manager.get_locale_groups('ja_JP')
self.assertDictEqual(locale_groups, {'ja_JP': ['custom-set', 'f27-set']})
def test_get_all_locales_groups(self):
"""
Test get_all_locales_groups
"""
groups_of_all_locales = self.inventory_manager.get_all_locales_groups()
self.assertDictEqual(groups_of_all_locales,
{'ja_JP': ['custom-set', 'f27-set'], 'fr_FR': ['custom-set', 'f27-set'],
'ru_RU': ['f27-set'], 'ko_KR': []})
def test_get_translation_platforms(self):
"""
Test get_translation_platforms
"""
transplatforms = self.inventory_manager.get_translation_platforms(engine='zanata')
self.assertEqual(transplatforms[1].api_url, 'https://translate.zanata.org')
self.assertEqual(transplatforms[1].platform_slug, 'ZNTAPUB')
def test_get_ci_platforms(self):
"""
Test get_translation_platforms
"""
ciplatforms = self.inventory_manager.get_translation_platforms(ci=True)
self.assertEqual(ciplatforms[0].api_url, 'https://cloud.memsource.com/web')
self.assertEqual(ciplatforms[0].platform_slug, 'MSRCPUB')
def test_get_transplatforms_set(self):
"""
Test get_transplatforms_set
"""
active_platforms, inactive_platforms = self.inventory_manager.get_transplatforms_set()
self.assertEqual(len(active_platforms), 3)
self.assertEqual(len(inactive_platforms), 0)
def test_get_engine_from_slug(self):
"""
Test get_engine_from_slug
"""
platform_engine = self.inventory_manager.get_engine_from_slug(
PlatformData.platform_zanata_fedora.platform_slug
)
self.assertEqual(platform_engine, 'zanata')
platform_engine = self.inventory_manager.get_engine_from_slug(
PlatformData.platform_memsource_cloud.platform_slug
)
self.assertEqual(platform_engine, 'memsource')
def test_get_transplatform_slug_url(self):
"""
test get_transplatform_slug_url
"""
slug_url_tuple = self.inventory_manager.get_transplatform_slug_url()
self.assertTupleEqual(slug_url_tuple, (('MSRCPUB', 'https://cloud.memsource.com/web'),
('ZNTAFED', 'https://fedora.zanata.org'),
('ZNTAPUB', 'https://translate.zanata.org')))
def test_get_relbranch_locales(self):
"""
Test get_relbranch_locales
"""
relbranch_locales = self.inventory_manager.get_relbranch_locales("nonexisting-relbranch")
self.assertFalse(relbranch_locales)
relbranch_locales = self.inventory_manager.get_relbranch_locales('fedora-27')
self.assertListEqual(relbranch_locales, ['ja_JP', 'fr_FR', 'ru_RU'])
def test_get_release_streams(self):
"""
Test get_release_streams
"""
relstream_fedora = Product.objects.get(product_name='Fedora')
relstream_rhel = Product.objects.get(product_name='RHEL')
release_streams = self.inventory_manager.get_release_streams()
self.assertEqual(len(release_streams), 2)
self.assertIn(relstream_fedora, release_streams)
self.assertIn(relstream_rhel, release_streams)
release_streams = self.inventory_manager.get_release_streams(stream_slug='RHEL')
self.assertEqual(len(release_streams), 1)
self.assertIn(relstream_rhel, release_streams)
release_streams = self.inventory_manager.get_release_streams(only_active=True)
self.assertEqual(len(release_streams), 1)
self.assertIn(relstream_fedora, release_streams)
def test_get_relstream_slug_name(self):
"""
Test get_relstream_slug_name
"""
relstream_slug_name_tuple = self.inventory_manager.get_relstream_slug_name()
self.assertEqual(len(relstream_slug_name_tuple), 1)
self.assertTupleEqual(relstream_slug_name_tuple[0], ('fedora', 'Fedora'))
def test_get_relstream_build_tags(self):
"""
Test get_relstream_build_tags
"""
tags = self.inventory_manager.get_relstream_build_tags(stream_slug='fedora')
self.assertIsInstance(tags, dict)
self.assertDictEqual(tags, {'fedora': ['f28', 'f29', 'rawhide']})
|
[
"fixture.style.NamedDataStyle",
"dashboard.models.Product.objects.get",
"dashboard.managers.inventory.InventoryManager"
] |
[((1094, 1112), 'dashboard.managers.inventory.InventoryManager', 'InventoryManager', ([], {}), '()\n', (1110, 1112), False, 'from dashboard.managers.inventory import InventoryManager\n'), ((1004, 1020), 'fixture.style.NamedDataStyle', 'NamedDataStyle', ([], {}), '()\n', (1018, 1020), False, 'from fixture.style import NamedDataStyle\n'), ((7259, 7301), 'dashboard.models.Product.objects.get', 'Product.objects.get', ([], {'product_name': '"""Fedora"""'}), "(product_name='Fedora')\n", (7278, 7301), False, 'from dashboard.models import Product\n'), ((7327, 7367), 'dashboard.models.Product.objects.get', 'Product.objects.get', ([], {'product_name': '"""RHEL"""'}), "(product_name='RHEL')\n", (7346, 7367), False, 'from dashboard.models import Product\n')]
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=broad-except, cyclic-import
import logging
import threading
from concurrent import futures
import grpc
from fedlearner_webconsole.proto import (
service_pb2, service_pb2_grpc,
common_pb2
)
from fedlearner_webconsole.db import db
from fedlearner_webconsole.project.models import Project
from fedlearner_webconsole.workflow.models import (
Workflow, WorkflowState, TransactionState
)
from fedlearner_webconsole.exceptions import (
UnauthorizedException
)
class RPCServerServicer(service_pb2_grpc.WebConsoleV2ServiceServicer):
def __init__(self, server):
self._server = server
def CheckConnection(self, request, context):
try:
return self._server.check_connection(request)
except UnauthorizedException as e:
return service_pb2.CheckConnectionResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNAUTHORIZED,
msg=repr(e)))
except Exception as e:
logging.error('CheckConnection rpc server error: %s', repr(e))
return service_pb2.CheckConnectionResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNKNOWN_ERROR,
msg=repr(e)))
def UpdateWorkflowState(self, request, context):
try:
return self._server.update_workflow_state(request)
except UnauthorizedException as e:
return service_pb2.UpdateWorkflowStateResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNAUTHORIZED,
msg=repr(e)))
except Exception as e:
logging.error('UpdateWorkflowState rpc server error: %s', repr(e))
return service_pb2.UpdateWorkflowStateResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNKNOWN_ERROR,
msg=repr(e)))
def GetWorkflow(self, request, context):
try:
return self._server.get_workflow(request)
except UnauthorizedException as e:
return service_pb2.GetWorkflowResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNAUTHORIZED,
msg=repr(e)))
except Exception as e:
logging.error('GetWorkflow rpc server error: %s', repr(e))
return service_pb2.GetWorkflowResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNKNOWN_ERROR,
msg=repr(e)))
class RpcServer(object):
def __init__(self):
self._lock = threading.Lock()
self._started = False
self._server = None
self._app = None
def start(self, app):
assert not self._started, "Already started"
self._app = app
listen_port = app.config.get('GRPC_LISTEN_PORT', 1999)
with self._lock:
self._server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10))
service_pb2_grpc.add_WebConsoleV2ServiceServicer_to_server(
RPCServerServicer(self), self._server)
self._server.add_insecure_port('[::]:%d' % listen_port)
self._server.start()
self._started = True
def stop(self):
if not self._started:
return
with self._lock:
self._server.stop(None).wait()
del self._server
self._started = False
def check_auth_info(self, auth_info):
logging.debug('auth_info: %s', auth_info)
project = Project.query.filter_by(
name=auth_info.project_name).first()
if project is None:
raise UnauthorizedException('Invalid project')
project_config = project.get_config()
# TODO: fix token verification
# if project_config.token != auth_info.auth_token:
# raise UnauthorizedException('Invalid token')
if project_config.domain_name != auth_info.target_domain:
raise UnauthorizedException('Invalid domain')
source_party = None
for party in project_config.participants:
if party.domain_name == auth_info.source_domain:
source_party = party
if source_party is None:
raise UnauthorizedException('Invalid domain')
return project, source_party
def check_connection(self, request):
with self._app.app_context():
_, party = self.check_auth_info(request.auth_info)
logging.debug(
'received check_connection from %s', party.domain_name)
return service_pb2.CheckConnectionResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_SUCCESS))
def update_workflow_state(self, request):
with self._app.app_context():
project, party = self.check_auth_info(request.auth_info)
logging.debug(
'received update_workflow_state from %s: %s',
party.domain_name, request)
name = request.workflow_name
state = WorkflowState(request.state)
target_state = WorkflowState(request.target_state)
transaction_state = TransactionState(request.transaction_state)
workflow = Workflow.query.filter_by(
name=request.workflow_name,
project_id=project.id).first()
if workflow is None:
assert state == WorkflowState.NEW
assert target_state == WorkflowState.READY
workflow = Workflow(
name=name,
project_id=project.id,
state=state, target_state=target_state,
transaction_state=transaction_state)
db.session.add(workflow)
db.session.commit()
db.session.refresh(workflow)
workflow.update_state(
state, target_state, transaction_state)
db.session.commit()
return service_pb2.UpdateWorkflowStateResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_SUCCESS),
transaction_state=workflow.transaction_state.value)
def _filter_workflow(self, workflow, modes):
# filter peer-readable and peer-writable variables
if workflow is None:
return
var_list = [
i for i in workflow.variables if i.access_mode in modes]
workflow.ClearField('variables')
for i in var_list:
workflow.variables.append(i)
for job_def in workflow.job_definitions:
var_list = [
i for i in job_def.variables if i.access_mode in modes]
job_def.ClearField('variables')
for i in var_list:
job_def.variables.append(i)
def get_workflow(self, request):
with self._app.app_context():
project, party = self.check_auth_info(request.auth_info)
workflow = Workflow.query.filter_by(
name=request.workflow_name,
project_id=project.id).first()
assert workflow is not None
config = workflow.get_config()
self._filter_workflow(
config,
[
common_pb2.Variable.PEER_READABLE,
common_pb2.Variable.PEER_WRITABLE
])
# job details
jobs = [service_pb2.JobDetail(
name=job.name, state=job.get_state_for_front())
for job in workflow.get_jobs()]
# fork info
forked_from = ''
if workflow.forked_from:
forked_from = Workflow.query.get(workflow.forked_from).name
return service_pb2.GetWorkflowResponse(
name=request.workflow_name,
status=common_pb2.Status(
code=common_pb2.STATUS_SUCCESS),
config=config,
jobs=jobs,
state=workflow.state.value,
target_state=workflow.target_state.value,
transaction_state=workflow.transaction_state.value,
forkable=workflow.forkable,
forked_from=forked_from,
reuse_job_names=workflow.get_reuse_job_names(),
peer_reuse_job_names=workflow.get_peer_reuse_job_names(),
fork_proposal_config=workflow.get_fork_proposal_config()
)
rpc_server = RpcServer()
|
[
"fedlearner_webconsole.workflow.models.TransactionState",
"fedlearner_webconsole.project.models.Project.query.filter_by",
"logging.debug",
"fedlearner_webconsole.db.db.session.add",
"threading.Lock",
"concurrent.futures.ThreadPoolExecutor",
"fedlearner_webconsole.workflow.models.Workflow.query.get",
"fedlearner_webconsole.proto.common_pb2.Status",
"fedlearner_webconsole.workflow.models.Workflow",
"fedlearner_webconsole.exceptions.UnauthorizedException",
"fedlearner_webconsole.workflow.models.Workflow.query.filter_by",
"fedlearner_webconsole.db.db.session.refresh",
"fedlearner_webconsole.db.db.session.commit",
"fedlearner_webconsole.workflow.models.WorkflowState"
] |
[((3268, 3284), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (3282, 3284), False, 'import threading\n'), ((4173, 4214), 'logging.debug', 'logging.debug', (['"""auth_info: %s"""', 'auth_info'], {}), "('auth_info: %s', auth_info)\n", (4186, 4214), False, 'import logging\n'), ((4353, 4393), 'fedlearner_webconsole.exceptions.UnauthorizedException', 'UnauthorizedException', (['"""Invalid project"""'], {}), "('Invalid project')\n", (4374, 4393), False, 'from fedlearner_webconsole.exceptions import UnauthorizedException\n'), ((4681, 4720), 'fedlearner_webconsole.exceptions.UnauthorizedException', 'UnauthorizedException', (['"""Invalid domain"""'], {}), "('Invalid domain')\n", (4702, 4720), False, 'from fedlearner_webconsole.exceptions import UnauthorizedException\n'), ((4948, 4987), 'fedlearner_webconsole.exceptions.UnauthorizedException', 'UnauthorizedException', (['"""Invalid domain"""'], {}), "('Invalid domain')\n", (4969, 4987), False, 'from fedlearner_webconsole.exceptions import UnauthorizedException\n'), ((5180, 5249), 'logging.debug', 'logging.debug', (['"""received check_connection from %s"""', 'party.domain_name'], {}), "('received check_connection from %s', party.domain_name)\n", (5193, 5249), False, 'import logging\n'), ((5583, 5675), 'logging.debug', 'logging.debug', (['"""received update_workflow_state from %s: %s"""', 'party.domain_name', 'request'], {}), "('received update_workflow_state from %s: %s', party.\n domain_name, request)\n", (5596, 5675), False, 'import logging\n'), ((5765, 5793), 'fedlearner_webconsole.workflow.models.WorkflowState', 'WorkflowState', (['request.state'], {}), '(request.state)\n', (5778, 5793), False, 'from fedlearner_webconsole.workflow.models import Workflow, WorkflowState, TransactionState\n'), ((5821, 5856), 'fedlearner_webconsole.workflow.models.WorkflowState', 'WorkflowState', (['request.target_state'], {}), '(request.target_state)\n', (5834, 5856), False, 'from fedlearner_webconsole.workflow.models import Workflow, WorkflowState, TransactionState\n'), ((5889, 5932), 'fedlearner_webconsole.workflow.models.TransactionState', 'TransactionState', (['request.transaction_state'], {}), '(request.transaction_state)\n', (5905, 5932), False, 'from fedlearner_webconsole.workflow.models import Workflow, WorkflowState, TransactionState\n'), ((6669, 6688), 'fedlearner_webconsole.db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (6686, 6688), False, 'from fedlearner_webconsole.db import db\n'), ((3615, 3657), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': '(10)'}), '(max_workers=10)\n', (3641, 3657), False, 'from concurrent import futures\n'), ((4233, 4285), 'fedlearner_webconsole.project.models.Project.query.filter_by', 'Project.query.filter_by', ([], {'name': 'auth_info.project_name'}), '(name=auth_info.project_name)\n', (4256, 4285), False, 'from fedlearner_webconsole.project.models import Project\n'), ((6242, 6366), 'fedlearner_webconsole.workflow.models.Workflow', 'Workflow', ([], {'name': 'name', 'project_id': 'project.id', 'state': 'state', 'target_state': 'target_state', 'transaction_state': 'transaction_state'}), '(name=name, project_id=project.id, state=state, target_state=\n target_state, transaction_state=transaction_state)\n', (6250, 6366), False, 'from fedlearner_webconsole.workflow.models import Workflow, WorkflowState, TransactionState\n'), ((6459, 6483), 'fedlearner_webconsole.db.db.session.add', 'db.session.add', (['workflow'], {}), '(workflow)\n', (6473, 6483), False, 'from fedlearner_webconsole.db import db\n'), ((6500, 6519), 'fedlearner_webconsole.db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (6517, 6519), False, 'from fedlearner_webconsole.db import db\n'), ((6536, 6564), 'fedlearner_webconsole.db.db.session.refresh', 'db.session.refresh', (['workflow'], {}), '(workflow)\n', (6554, 6564), False, 'from fedlearner_webconsole.db import db\n'), ((5346, 5395), 'fedlearner_webconsole.proto.common_pb2.Status', 'common_pb2.Status', ([], {'code': 'common_pb2.STATUS_SUCCESS'}), '(code=common_pb2.STATUS_SUCCESS)\n', (5363, 5395), False, 'from fedlearner_webconsole.proto import service_pb2, service_pb2_grpc, common_pb2\n'), ((5956, 6031), 'fedlearner_webconsole.workflow.models.Workflow.query.filter_by', 'Workflow.query.filter_by', ([], {'name': 'request.workflow_name', 'project_id': 'project.id'}), '(name=request.workflow_name, project_id=project.id)\n', (5980, 6031), False, 'from fedlearner_webconsole.workflow.models import Workflow, WorkflowState, TransactionState\n'), ((6776, 6825), 'fedlearner_webconsole.proto.common_pb2.Status', 'common_pb2.Status', ([], {'code': 'common_pb2.STATUS_SUCCESS'}), '(code=common_pb2.STATUS_SUCCESS)\n', (6793, 6825), False, 'from fedlearner_webconsole.proto import service_pb2, service_pb2_grpc, common_pb2\n'), ((7713, 7788), 'fedlearner_webconsole.workflow.models.Workflow.query.filter_by', 'Workflow.query.filter_by', ([], {'name': 'request.workflow_name', 'project_id': 'project.id'}), '(name=request.workflow_name, project_id=project.id)\n', (7737, 7788), False, 'from fedlearner_webconsole.workflow.models import Workflow, WorkflowState, TransactionState\n'), ((8419, 8459), 'fedlearner_webconsole.workflow.models.Workflow.query.get', 'Workflow.query.get', (['workflow.forked_from'], {}), '(workflow.forked_from)\n', (8437, 8459), False, 'from fedlearner_webconsole.workflow.models import Workflow, WorkflowState, TransactionState\n'), ((8584, 8633), 'fedlearner_webconsole.proto.common_pb2.Status', 'common_pb2.Status', ([], {'code': 'common_pb2.STATUS_SUCCESS'}), '(code=common_pb2.STATUS_SUCCESS)\n', (8601, 8633), False, 'from fedlearner_webconsole.proto import service_pb2, service_pb2_grpc, common_pb2\n')]
|
#!/usr/bin/env python
from __future__ import print_function
import logging
import os
import signal
from time import sleep
from subprocess import Popen, PIPE
import socket
from core.common_functions import *
from core.run import Runner
class NginxPerf(Runner):
"""
Runs Nginx
"""
name = "nginx"
exp_name = "nginx"
bench_suite = False
benchmarks = {"nginx": ""}
test_benchmarks = {"nginx": ""}
client_numbers = [1, 5, 9, 13, 17, 21, 25, 29]
ab = "ab"
duration = 20 # in seconds
requests_num = 1000000 # some huge number so we always take 20 seconds
def __init__(self, *args, **kwargs):
super(NginxPerf, self).__init__(*args, **kwargs)
if self.config.input_type == "test":
self.client_numbers = (1,)
def per_benchmark_action(self, type_, benchmark, args):
self.log_build(type_, benchmark)
build_path = "/".join([self.dirs["build"], type_])
self.current_exe = build_path + '/sbin/' + benchmark
build_benchmark(
b=benchmark,
t=type_,
makefile=self.dirs['bench_src'],
build_path=build_path
)
# generate an input file
with open(build_path + "/html/index.html", "w") as f:
f.write("<html><body><h1>It works!</h1>")
random_text = my_check_output("lorem -p 10")
f.write(random_text)
f.write("</body></html>")
# config Nginx
replace_in_file(build_path + "/conf/nginx.conf", "listen 80;", "listen 8080;", ignoreifcontains=True)
replace_in_file(build_path + "/conf/nginx.conf", "worker_processes 1;", "worker_processes auto;", ignoreifcontains=True)
def per_thread_action(self, type_, benchmark, args, thread_num):
servercmd = "{action} {exe} -g \"daemon off;\"".format(
action=self.action,
exe=self.current_exe,
)
logging.debug("Server command: %s" % servercmd)
# by default start client on local machine
if env.get("CLIENT_MACHINE"):
ssh = "ssh %s" % env["CLIENT_MACHINE"]
logging.debug("Using remote client: %s" % env["CLIENT_MACHINE"])
else:
ssh = ""
logging.debug("Using local client (use CLIENT_MACHINE env var to specify remote client)")
myip = [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
with open(self.dirs["log_file"], "a") as f:
for client_number in self.client_numbers:
# start server
my_check_output("pkill -9 nginx > /dev/null || true") # for sanity
sleep(1)
server = Popen(servercmd, shell=True, stdout=PIPE, stderr=PIPE, preexec_fn=os.setsid)
sleep(1)
# start client (possibly on another machine)
msg = self.run_message.format(input=client_number, **locals())
self.log_run(msg)
f.write("[run] " + msg + "\n")
out = my_check_output("{ssh} {ab} -k -t {duration} -n {requests_num} -c {client_number} http://{myip}:8080/".format(
ab=self.ab,
duration=self.duration,
requests_num=self.requests_num,
**locals()
))
f.write("===== client =====\n")
f.write(out)
# log and stop server
f.write("===== return code is %s =====\n" % str(server.poll()))
try:
os.killpg(server.pid, signal.SIGINT)
except:
pass
f.write("===== stdout =====\n")
for line in server.stdout:
f.write(line.decode('utf-8'))
f.write("===== stderr =====\n")
for line in server.stderr:
f.write(line.decode('utf-8'))
sleep(1)
def set_logging(self):
self.num_benchmarks = len(self.benchmarks) * len(self.types) * self.num_runs * len(self.client_numbers)
logging.info("Total runs: %d" % self.num_benchmarks)
def main(benchmark_name=None):
runner = NginxPerf()
runner.main()
|
[
"logging.debug",
"socket.socket",
"subprocess.Popen",
"time.sleep",
"os.killpg",
"socket.gethostname",
"logging.info"
] |
[((1943, 1990), 'logging.debug', 'logging.debug', (["('Server command: %s' % servercmd)"], {}), "('Server command: %s' % servercmd)\n", (1956, 1990), False, 'import logging\n'), ((4304, 4356), 'logging.info', 'logging.info', (["('Total runs: %d' % self.num_benchmarks)"], {}), "('Total runs: %d' % self.num_benchmarks)\n", (4316, 4356), False, 'import logging\n'), ((2144, 2208), 'logging.debug', 'logging.debug', (["('Using remote client: %s' % env['CLIENT_MACHINE'])"], {}), "('Using remote client: %s' % env['CLIENT_MACHINE'])\n", (2157, 2208), False, 'import logging\n'), ((2256, 2356), 'logging.debug', 'logging.debug', (['"""Using local client (use CLIENT_MACHINE env var to specify remote client)"""'], {}), "(\n 'Using local client (use CLIENT_MACHINE env var to specify remote client)'\n )\n", (2269, 2356), False, 'import logging\n'), ((2855, 2863), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (2860, 2863), False, 'from time import sleep\n'), ((2889, 2965), 'subprocess.Popen', 'Popen', (['servercmd'], {'shell': '(True)', 'stdout': 'PIPE', 'stderr': 'PIPE', 'preexec_fn': 'os.setsid'}), '(servercmd, shell=True, stdout=PIPE, stderr=PIPE, preexec_fn=os.setsid)\n', (2894, 2965), False, 'from subprocess import Popen, PIPE\n'), ((2982, 2990), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (2987, 2990), False, 'from time import sleep\n'), ((4147, 4155), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (4152, 4155), False, 'from time import sleep\n'), ((3763, 3799), 'os.killpg', 'os.killpg', (['server.pid', 'signal.SIGINT'], {}), '(server.pid, signal.SIGINT)\n', (3772, 3799), False, 'import os\n'), ((2414, 2434), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (2432, 2434), False, 'import socket\n'), ((2546, 2594), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (2559, 2594), False, 'import socket\n')]
|
"""Test file sequence discovery on disk."""
# "Future" Libraries
from __future__ import print_function
# Standard Libraries
import os
import unittest
# Third Party Libraries
import mock
from builtins import range
from future.utils import lrange
from . import (DirEntry, generate_entries, initialise_mock_scandir_data,
mock_scandir_deep)
from .. import (__version__, get_parser, get_sequence, get_version, invert,
validate_frame_sequence)
from ..sequences import FileSequence, FrameChunk, FrameSequence
###############################################################################
# class: TestSeqparseModule
class TestSeqparseModule(unittest.TestCase):
"""Test file discovery on the seqparse module."""
_test_ext = "exr"
_test_file_name = "TEST_DIR"
_test_root = "test_dir"
_singletons = ["singleton0.jpg", "singleton1.jpg"]
def setUp(self):
"""Set up the test case."""
pass
@mock.patch("seqparse.seqparse.scandir")
def test_singletons(self, mock_api_call):
"""Seqparse: Test file singleton discovery from disk location."""
# Expected outputs ...
output = [os.path.join(self._test_root, x) for x in self._singletons]
entries = list()
for file_name in output:
entries.append(DirEntry(file_name))
mock_api_call.return_value = iter(entries)
parser = get_parser()
parser.scan_path(self._test_root)
file_names = parser.singletons
self.assertIn(self._test_root, file_names)
self.assertEqual(self._test_root, file_names[self._test_root].path)
self.assertEqual(len(file_names), 1)
self.assertEqual(
len(file_names[self._test_root]), len(self._singletons))
self.assertEqual(
sorted(self._singletons), sorted(file_names[self._test_root]))
# Check parser output ...
self.assertEqual(sorted(map(str, parser.output())), output)
# Test seqs_only option ...
self.assertEqual(sorted(parser.output(seqs_only=True)), [])
@mock.patch("seqparse.seqparse.scandir")
def test_single_padded_file(self, mock_api_call):
"""Seqparse: Test single padded file sequence discovery."""
frames = {4: [1]}
# Expected outputs ...
frame_seq_output = "0001"
file_seq_output = ".".join(
(self._test_file_name, frame_seq_output, self._test_ext))
final_output = os.path.join(self._test_root, file_seq_output)
input_entries = generate_entries(
ext=self._test_ext,
frames=frames,
name=self._test_file_name,
root=self._test_root)
mock_api_call.return_value = iter(input_entries)
parser = get_parser()
parser.scan_path(self._test_root)
data = parser.sequences
test_output = list(parser.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
# Check the structure of the sequences property.
self.assertIn(self._test_root, data)
self.assertEqual(len(data), 1)
self.assertIn(self._test_file_name, data[self._test_root])
self.assertEqual(len(data[self._test_root]), 1)
# Now check the file sequence itself.
file_seq = data[self._test_root][self._test_file_name]
test_output = list(file_seq.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
self.assertIn(self._test_ext, file_seq)
self.assertEqual(len(file_seq), 1)
self.assertTrue(4 in file_seq[self._test_ext])
self.assertEqual(len(file_seq[self._test_ext]), 1)
# And finally, the file sequence.
file_seq = file_seq[self._test_ext][4]
self.assertEqual(len(file_seq), len(frames[4]))
self.assertEqual(str(file_seq), final_output)
@mock.patch("seqparse.seqparse.scandir")
def test_simple_sequence(self, mock_api_call):
"""Seqparse: Test simple file sequence discovery."""
frames = {4: [0, 1, 2, 3, 4]}
# Expected outputs ...
frame_seq_output = "0000-0004"
file_seq_output = ".".join(
(self._test_file_name, frame_seq_output, self._test_ext))
final_output = os.path.join(self._test_root, file_seq_output)
input_entries = generate_entries(
ext=self._test_ext,
frames=frames,
name=self._test_file_name,
root=self._test_root)
mock_api_call.return_value = iter(input_entries)
parser = get_parser()
parser.scan_path(self._test_root)
data = parser.sequences
test_output = list(parser.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
# Check the structure of the sequences property.
self.assertIn(self._test_root, data)
self.assertEqual(len(data), 1)
self.assertIn(self._test_file_name, data[self._test_root])
self.assertEqual(len(data[self._test_root]), 1)
# Now check the file sequence itself.
file_seq = data[self._test_root][self._test_file_name]
test_output = list(file_seq.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
self.assertIn(self._test_ext, file_seq)
self.assertEqual(len(file_seq), 1)
self.assertTrue(4 in file_seq[self._test_ext])
self.assertEqual(len(file_seq[self._test_ext]), 1)
# And finally, the file sequence.
file_seq = file_seq[self._test_ext][4]
self.assertEqual(len(file_seq), len(frames[4]))
self.assertEqual(str(file_seq), final_output)
@mock.patch("seqparse.seqparse.scandir")
def test_complex_sequence(self, mock_api_call):
"""Seqparse: Test complex file sequence discovery."""
frames = {
1: [5, 6, 7, 8, 114, 199, 2000],
3: [8, 9, 10, 12],
4: [0, 1, 2, 3, 4, 5, 6, 8, 10, 12, 101]
}
input_entries = generate_entries(
ext=self._test_ext,
frames=frames,
name=self._test_file_name,
root=self._test_root)
# Expected output frame sequences. Note how frames 114, 199 move to the
# "pad 3" group and 2000 moves to the "pad 4" group!
output_seqs = {
1: "5-8",
3: "008-010,012,114,199",
4: "0000-0006,0008-0012x2,0101,2000"
}
# Expected final output (where "/" is os.sep):
# test_dir/TEST_DIR.5-8.exr
# test_dir/TEST_DIR.008-010,012,114,199.exr
# test_dir/TEST_DIR.0000-0006,0008-0012x2,0101,2000.exr
mock_api_call.return_value = iter(input_entries)
parser = get_parser()
parser.scan_path(self._test_root)
final_output = list()
for pad, seq_frames in sorted(output_seqs.items()):
bits = (self._test_file_name, seq_frames, self._test_ext)
output_seqs[pad] = os.path.join(self._test_root, ".".join(bits))
final_output.append(output_seqs[pad])
data = parser.sequences
# Check the structure of the sequences property.
self.assertIn(self._test_root, data)
self.assertEqual(len(data), 1)
self.assertIn(self._test_file_name, data[self._test_root])
self.assertEqual(len(data[self._test_root]), 1)
# Now check the file sequence itself.
file_seq = data[self._test_root][self._test_file_name]
test_output = list(file_seq.output())
self.assertEqual(len(test_output), 3)
self.assertEqual(list(map(str, test_output)), final_output)
self.assertIn(self._test_ext, file_seq)
self.assertEqual(len(file_seq), 1)
self.assertEqual(set(file_seq[self._test_ext]), set(output_seqs))
# And finally, the file sequences.
for pad in sorted(output_seqs):
self.assertEqual(output_seqs[pad],
str(file_seq[self._test_ext][pad]))
@mock.patch("seqparse.seqparse.scandir")
def test_nested_sequences(self, mock_api_call):
"""Seqparse: Test file sequence discovery in nested directories."""
mock_api_call.side_effect = mock_scandir_deep
print("\n\n SEQUENCES\n ---------")
initialise_mock_scandir_data(self._test_root)
parser = get_parser()
parser.scan_path(self._test_root)
for seq in parser.output():
print(" ", seq)
print("\n MAX LEVELS\n ----------")
for max_levels in range(-1, 4):
initialise_mock_scandir_data(self._test_root)
parser = get_parser()
parser.scan_path(self._test_root, max_levels=max_levels)
expected_seqs = max_levels + 2
if max_levels == -1:
expected_seqs = 5
seqs = list(parser.output())
blurb = " o max_levels == {:d}: {:d} ({:d} expected) entries"
print(blurb.format(max_levels, len(seqs), expected_seqs))
for seq in seqs:
print(" -", seq)
self.assertEqual(len(seqs), expected_seqs)
print("\n MIN LEVELS\n ----------")
for min_levels in range(-1, 4):
initialise_mock_scandir_data(self._test_root)
parser = get_parser()
parser.scan_path(self._test_root, min_levels=min_levels)
expected_seqs = 3 - min_levels
if min_levels == -1:
expected_seqs = 5
seqs = list(parser.output())
blurb = " o min_levels == {:d}: {:d} ({:d} expected) entries"
print(blurb.format(min_levels, len(seqs), expected_seqs))
for seq in seqs:
print(" -", seq)
self.assertEqual(len(seqs), expected_seqs)
print("")
def test_valid_frame_sequences(self):
"""Seqparse: Test validity of simple frame ranges."""
good_frame_seqs = [
"0001", ",0001", "0001,", "0001-0001", "0001-0001x0",
"0001-0003x3", "0001,0003", "0001,,0003", "0001-0010",
"0001-0010x0", "0001-0011x2", "0001-0012x2", "0001-0005,0007-0010",
"0001-0005x2,0007-0010", "0001-0005,0007-0011x2",
"0001-0005,0006,0008-0012x2", "0001,0003-0007,0009-0015x2",
"3,1,5,7", "01-05,03-07"
]
bad_frame_seqs = [
"-0001", "0001-", "0001x2", "x2", "0001,0003x2", "0001-0005x",
"0010-0001", "x", ",", ",,", ""
]
print("\n\n GOOD SEQUENCES\n --------------")
for frame_seq in good_frame_seqs:
output = validate_frame_sequence(frame_seq)
print(' o {!r} --> {!r}'.format(frame_seq, output))
self.assertTrue(output)
print("\n BAD SEQUENCES\n -------------")
for frame_seq in bad_frame_seqs:
print(' o {!r}'.format(frame_seq))
self.assertFalse(validate_frame_sequence(frame_seq))
print("")
def test_add_file_sequence(self):
"""Seqparse: Test file sequence addition via seqparse.add_file."""
input_file = ".".join((self._test_file_name, "0005", self._test_ext))
input_file = os.path.join(self._test_root, input_file)
# Expected outputs ...
input_frame_seq = "0000-0004"
output_frame_seq = "0000-0005"
input_file_seq = ".".join(
(self._test_file_name, input_frame_seq, self._test_ext))
input_file_seq = os.path.join(self._test_root, input_file_seq)
output_file_seq = ".".join(
(self._test_file_name, output_frame_seq, self._test_ext))
output_file_seq = os.path.join(self._test_root, output_file_seq)
print("\n\n INPUT FILES\n -----------")
print(" o", input_file_seq)
print(" o", input_file)
parser = get_parser()
parser.add_file(input_file_seq)
parser.add_file(input_file)
output = list(parser.output())
print("\n OUTPUT FILES\n ------------")
for line in output:
print(" o", line)
print("\n EXPECTED OUTPUT\n ---------------")
print(" o", output_file_seq)
print("")
self.assertEqual(len(output), 1)
self.assertEqual(str(output[0]), output_file_seq)
input_frame_seq = "0000-0002,,0003-0005"
input_file_seq = ".".join(
(self._test_file_name, input_frame_seq, self._test_ext))
input_file_seq = os.path.join(self._test_root, input_file_seq)
print("\n INPUT FILES\n -----------")
print(" o", input_file_seq)
print(" o", input_file)
parser = get_parser()
parser.add_file(input_file_seq)
parser.add_file(input_file)
output = list(parser.output())
print("\n OUTPUT FILES\n ------------")
for line in output:
print(" o", line)
print("\n EXPECTED OUTPUT\n ---------------")
print(" o", output_file_seq)
print("")
self.assertEqual(len(output), 1)
self.assertEqual(str(output[0]), output_file_seq)
@mock.patch("seqparse.seqparse.scandir")
def test_inversion(self, mock_api_call):
"""Seqparse: Test usage of the "missing" option in Seqparse.output."""
file_path = os.path.join(self._test_root, self._test_file_name)
chunk_in = FrameChunk(first=1, last=11, step=2, pad=4)
fseq = FileSequence(
name=file_path, ext=self._test_ext, frames=chunk_in)
input_entries = [DirEntry(x) for x in fseq]
mock_api_call.return_value = input_entries
chunk_out = FrameChunk(first=2, last=10, step=2, pad=4)
expected = FileSequence(
name=file_path, ext=self._test_ext, frames=chunk_out)
parser = get_parser()
parser.scan_path(self._test_root)
inverted = list(parser.output(missing=True))
self.assertEqual(len(inverted), 1)
print("\n\n SEQUENCE\n --------")
print(" input files: ", fseq)
print(" expected files:", expected)
print(" inverted files:", inverted[0])
self.assertEqual(str(inverted[0]), str(expected))
fseq = FileSequence(
name=file_path, ext=self._test_ext, frames=[1, 2, 3, 4, 6], pad=4)
input_entries = [DirEntry(x) for x in fseq]
mock_api_call.return_value = input_entries
expected = FileSequence(
name=file_path, ext=self._test_ext, frames=[5], pad=4)
parser = get_parser()
parser.scan_path(self._test_root)
inverted = list(parser.output(missing=True))
self.assertEqual(len(inverted), 1)
print("\n\n SEQUENCE\n --------")
print(" input files: ", fseq)
print(" expected files:", expected)
print(" inverted files:", inverted[0])
self.assertEqual(str(inverted[0]), str(expected))
@mock.patch("seqparse.seqparse.scandir")
def test_scan_options(self, mock_api_call):
"""Seqparse: Make sure scan_options works as expected."""
frames = {4: (1, 2, 3, 4, 6)}
input_entries = generate_entries(
name="test", ext="py", frames=frames, root=self._test_root)
input_entries.extend(
generate_entries(
name=".test", ext="py", frames=frames, root=self._test_root))
input_entries.append(
DirEntry(os.path.join(self._test_root, "pony.py")))
mock_api_call.return_value = input_entries
parser = get_parser()
parser.scan_options["stat"] = True
parser.scan_path(self._test_root)
output = list(parser.output())
expected = [
os.path.join(self._test_root, "test.0001-0004,0006.py"),
os.path.join(self._test_root, "pony.py")
]
self.assertEqual(len(output), 2)
self.assertEqual(list(map(str, output)), expected)
self.assertEqual(output[0].ctime, 1490908340)
self.assertEqual(output[0].mtime, 1490908305)
self.assertEqual(output[0].size, 36520)
parser = get_parser()
parser.scan_options["all"] = True
parser.scan_path(self._test_root)
output = list(parser.output())
expected = [
os.path.join(self._test_root, ".test.0001-0004,0006.py"),
os.path.join(self._test_root, "test.0001-0004,0006.py"),
os.path.join(self._test_root, "pony.py")
]
self.assertEqual(len(output), 3)
self.assertEqual(list(map(str, output)), expected)
def test_api_calls(self):
"""Seqparse: Test API calls at root of module."""
chunk = FrameChunk(first=1, last=7, step=2, pad=4)
seq = get_sequence(lrange(1, 8, 2), pad=4)
self.assertTrue(isinstance(seq, FrameSequence))
self.assertEqual(str(seq), "0001-0007x2")
expected = FrameChunk(first=2, last=6, step=2, pad=4)
inverted = invert(chunk)
self.assertEqual(str(inverted), str(expected))
inverted = invert(seq)
self.assertEqual(str(inverted), str(expected))
with self.assertRaises(TypeError):
invert(get_parser())
self.assertEqual(get_version(), __version__)
|
[
"future.utils.lrange",
"builtins.range",
"mock.patch",
"os.path.join"
] |
[((963, 1002), 'mock.patch', 'mock.patch', (['"""seqparse.seqparse.scandir"""'], {}), "('seqparse.seqparse.scandir')\n", (973, 1002), False, 'import mock\n'), ((2087, 2126), 'mock.patch', 'mock.patch', (['"""seqparse.seqparse.scandir"""'], {}), "('seqparse.seqparse.scandir')\n", (2097, 2126), False, 'import mock\n'), ((3949, 3988), 'mock.patch', 'mock.patch', (['"""seqparse.seqparse.scandir"""'], {}), "('seqparse.seqparse.scandir')\n", (3959, 3988), False, 'import mock\n'), ((5818, 5857), 'mock.patch', 'mock.patch', (['"""seqparse.seqparse.scandir"""'], {}), "('seqparse.seqparse.scandir')\n", (5828, 5857), False, 'import mock\n'), ((8154, 8193), 'mock.patch', 'mock.patch', (['"""seqparse.seqparse.scandir"""'], {}), "('seqparse.seqparse.scandir')\n", (8164, 8193), False, 'import mock\n'), ((13260, 13299), 'mock.patch', 'mock.patch', (['"""seqparse.seqparse.scandir"""'], {}), "('seqparse.seqparse.scandir')\n", (13270, 13299), False, 'import mock\n'), ((15059, 15098), 'mock.patch', 'mock.patch', (['"""seqparse.seqparse.scandir"""'], {}), "('seqparse.seqparse.scandir')\n", (15069, 15098), False, 'import mock\n'), ((2470, 2516), 'os.path.join', 'os.path.join', (['self._test_root', 'file_seq_output'], {}), '(self._test_root, file_seq_output)\n', (2482, 2516), False, 'import os\n'), ((4339, 4385), 'os.path.join', 'os.path.join', (['self._test_root', 'file_seq_output'], {}), '(self._test_root, file_seq_output)\n', (4351, 4385), False, 'import os\n'), ((8686, 8698), 'builtins.range', 'range', (['(-1)', '(4)'], {}), '(-1, 4)\n', (8691, 8698), False, 'from builtins import range\n'), ((9353, 9365), 'builtins.range', 'range', (['(-1)', '(4)'], {}), '(-1, 4)\n', (9358, 9365), False, 'from builtins import range\n'), ((11344, 11385), 'os.path.join', 'os.path.join', (['self._test_root', 'input_file'], {}), '(self._test_root, input_file)\n', (11356, 11385), False, 'import os\n'), ((11624, 11669), 'os.path.join', 'os.path.join', (['self._test_root', 'input_file_seq'], {}), '(self._test_root, input_file_seq)\n', (11636, 11669), False, 'import os\n'), ((11802, 11848), 'os.path.join', 'os.path.join', (['self._test_root', 'output_file_seq'], {}), '(self._test_root, output_file_seq)\n', (11814, 11848), False, 'import os\n'), ((12619, 12664), 'os.path.join', 'os.path.join', (['self._test_root', 'input_file_seq'], {}), '(self._test_root, input_file_seq)\n', (12631, 12664), False, 'import os\n'), ((13444, 13495), 'os.path.join', 'os.path.join', (['self._test_root', 'self._test_file_name'], {}), '(self._test_root, self._test_file_name)\n', (13456, 13495), False, 'import os\n'), ((1172, 1204), 'os.path.join', 'os.path.join', (['self._test_root', 'x'], {}), '(self._test_root, x)\n', (1184, 1204), False, 'import os\n'), ((15839, 15894), 'os.path.join', 'os.path.join', (['self._test_root', '"""test.0001-0004,0006.py"""'], {}), "(self._test_root, 'test.0001-0004,0006.py')\n", (15851, 15894), False, 'import os\n'), ((15908, 15948), 'os.path.join', 'os.path.join', (['self._test_root', '"""pony.py"""'], {}), "(self._test_root, 'pony.py')\n", (15920, 15948), False, 'import os\n'), ((16403, 16459), 'os.path.join', 'os.path.join', (['self._test_root', '""".test.0001-0004,0006.py"""'], {}), "(self._test_root, '.test.0001-0004,0006.py')\n", (16415, 16459), False, 'import os\n'), ((16473, 16528), 'os.path.join', 'os.path.join', (['self._test_root', '"""test.0001-0004,0006.py"""'], {}), "(self._test_root, 'test.0001-0004,0006.py')\n", (16485, 16528), False, 'import os\n'), ((16542, 16582), 'os.path.join', 'os.path.join', (['self._test_root', '"""pony.py"""'], {}), "(self._test_root, 'pony.py')\n", (16554, 16582), False, 'import os\n'), ((16869, 16884), 'future.utils.lrange', 'lrange', (['(1)', '(8)', '(2)'], {}), '(1, 8, 2)\n', (16875, 16884), False, 'from future.utils import lrange\n'), ((15556, 15596), 'os.path.join', 'os.path.join', (['self._test_root', '"""pony.py"""'], {}), "(self._test_root, 'pony.py')\n", (15568, 15596), False, 'import os\n')]
|
from django.contrib import admin
from wouso.core.security.models import Report
admin.site.register(Report)
|
[
"django.contrib.admin.site.register"
] |
[((80, 107), 'django.contrib.admin.site.register', 'admin.site.register', (['Report'], {}), '(Report)\n', (99, 107), False, 'from django.contrib import admin\n')]
|
from __future__ import annotations
from typing import TypeVar, Generic, Callable, Optional, Any, cast, Tuple
import rx
from returns import pipeline
from returns.functions import identity
from returns.maybe import Maybe, Nothing
from rx import Observable
from rx.subject import BehaviorSubject
from . import ReactiveValue, ReactiveView
from .value import Modifier
T = TypeVar("T")
class ReactiveProperty(Generic[T], ReactiveValue[T]):
def __init__(
self,
init_value: Maybe[T] = Nothing,
read_only=False,
modifier: Callable[[Any], Modifier] = lambda _: identity,
validator: Callable[[Any, T], T] = lambda _, v: v) -> None:
super().__init__(read_only)
self._init_value = init_value
self._modifier = modifier
self._validator = validator
@property
def init_value(self) -> Maybe[T]:
return self._init_value
@property
def validator(self) -> Callable[[T, Any], T]:
return self._validator
@property
def modifier(self) -> Callable[[Any], Modifier]:
return self._modifier
def as_view(self) -> ReactiveView[T]:
return ReactiveView(self.context, self.read_only)
def pipe(self, modifiers: Callable[[Any], Tuple[Modifier, ...]]) -> ReactiveProperty:
def stack(obj: Any):
# FIXME: Not sure why both PyCharm and Mypy fails to resolve pipeline.pipe(). Should investigate later.
# noinspection PyUnresolvedReferences
return pipeline.pipe(*([self.modifier(obj)] + list(modifiers(obj)))) # type:ignore
return ReactiveProperty(self.init_value, self.read_only, stack, self.validator)
def validate(self, validator: Callable[[Any, T], T]) -> ReactiveProperty[T]:
if validator is None:
raise ValueError("Argument 'modifier' is required.")
def validate(obj: Any, v: T) -> T:
return validator(obj, self.validator(obj, v))
return ReactiveProperty(self.init_value, self.read_only, self.modifier, validate)
class PropertyData(ReactiveValue.Data[T]):
def __init__(
self,
name: str,
init_value: Maybe[T],
modifier: Modifier,
validator: Callable[[T], T]):
assert name is not None
assert init_value is not None
assert modifier is not None
assert validator is not None
self._validator = validator
self._property: Optional[BehaviorSubject] = None
obs: Observable
if init_value != Nothing:
self._property = BehaviorSubject(init_value.map(validator).unwrap())
obs = self._property
else:
obs = rx.empty()
super().__init__(name, obs, modifier)
# Must override to appease Mypy... I hate Python.
@property
def value(self) -> T:
return super().value
@value.setter
def value(self, value: T):
self._check_disposed()
if self.initialized:
assert self._property is not None
self._property.on_next(self.validator(value))
else:
self._property = BehaviorSubject(self.validator(value))
self.observable = self._property
@property
def validator(self) -> Callable[[T], T]:
return self._validator
def dispose(self) -> None:
assert self._property is not None
self._check_disposed()
self._property.on_completed()
super().dispose()
def _create_data(self, obj: Any) -> PropertyData:
assert obj is not None
assert self.name is not None
def validate(v: T) -> T:
return self.validator(obj, v)
return self.PropertyData(self.name, self.init_value, self.modifier(obj), validate)
def _get_data(self, obj: Any) -> PropertyData:
assert obj is not None
return cast(ReactiveProperty.PropertyData, super()._get_data(obj))
def _set_value(self, obj: Any, data: ReactiveValue.Data, value: Any) -> None:
assert obj is not None
assert isinstance(data, ReactiveProperty.PropertyData)
data.value = value
|
[
"rx.empty",
"typing.TypeVar"
] |
[((371, 383), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (378, 383), False, 'from typing import TypeVar, Generic, Callable, Optional, Any, cast, Tuple\n'), ((2792, 2802), 'rx.empty', 'rx.empty', ([], {}), '()\n', (2800, 2802), False, 'import rx\n')]
|
#!/usr/bin/env python
#
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import absolute_import, division, print_function
import argparse
import os
import glog as log
import numpy as np
import cv2
from lxml import etree
from tqdm import tqdm
def parse_args():
"""Parse arguments of command line"""
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
description='Convert CVAT XML annotations to masks'
)
parser.add_argument(
'--cvat-xml', metavar='FILE', required=True,
help='input file with CVAT annotation in xml format'
)
parser.add_argument(
'--background-color', metavar='COLOR_BGR', default="0,0,0",
help='specify background color (by default: 0,0,0)'
)
parser.add_argument(
'--label-color', metavar='LABEL:COLOR_BGR', action='append',
default=[],
help="specify a label's color (e.g. 255 or 255,0,0). The color will " +
"be interpreted in accordance with the mask format."
)
parser.add_argument(
'--mask-bitness', type=int, choices=[8, 24], default=8,
help='choose bitness for masks'
)
parser.add_argument(
'--output-dir', metavar='DIRECTORY', required=True,
help='directory for output masks'
)
return parser.parse_args()
def parse_anno_file(cvat_xml):
root = etree.parse(cvat_xml).getroot()
anno = []
for image_tag in root.iter('image'):
image = {}
for key, value in image_tag.items():
image[key] = value
image['shapes'] = []
for poly_tag in image_tag.iter('polygon'):
polygon = {'type': 'polygon'}
for key, value in poly_tag.items():
polygon[key] = value
image['shapes'].append(polygon)
for box_tag in image_tag.iter('box'):
box = {'type': 'box'}
for key, value in box_tag.items():
box[key] = value
box['points'] = "{0},{1};{2},{1};{2},{3};{0},{3}".format(
box['xtl'], box['ytl'], box['xbr'], box['ybr'])
image['shapes'].append(box)
image['shapes'].sort(key=lambda x: int(x.get('z_order', 0)))
anno.append(image)
return anno
def create_mask_file(mask_path, width, height, bitness, color_map, background, shapes):
mask = np.zeros((height, width, bitness // 8), dtype=np.uint8)
for shape in shapes:
color = color_map.get(shape['label'], background)
points = [tuple(map(float, p.split(','))) for p in shape['points'].split(';')]
points = np.array([(int(p[0]), int(p[1])) for p in points])
mask = cv2.fillPoly(mask, [points], color=color)
cv2.imwrite(mask_path, mask)
def to_scalar(str, dim):
scalar = list(map(int, str.split(',')))
if len(scalar) < dim:
scalar.extend([scalar[-1]] * dim)
return tuple(scalar[0:dim])
def main():
args = parse_args()
anno = parse_anno_file(args.cvat_xml)
color_map = {}
dim = args.mask_bitness // 8
for item in args.label_color:
label, color = item.split(':')
color_map[label] = to_scalar(color, dim)
background = to_scalar(args.background_color, dim)
for image in tqdm(anno, desc='Generate masks'):
mask_path = os.path.join(args.output_dir, os.path.splitext(image['name'])[0] + '.png')
mask_dir = os.path.dirname(mask_path)
if mask_dir:
os.makedirs(mask_dir, exist_ok=True)
create_mask_file(mask_path, int(image['width']), int(image['height']),
args.mask_bitness, color_map, background, image['shapes'])
if __name__ == "__main__":
main()
|
[
"cv2.imwrite",
"cv2.fillPoly",
"argparse.ArgumentParser",
"os.makedirs",
"tqdm.tqdm",
"lxml.etree.parse",
"os.path.splitext",
"os.path.dirname",
"numpy.zeros"
] |
[((358, 466), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'fromfile_prefix_chars': '"""@"""', 'description': '"""Convert CVAT XML annotations to masks"""'}), "(fromfile_prefix_chars='@', description=\n 'Convert CVAT XML annotations to masks')\n", (381, 466), False, 'import argparse\n'), ((2382, 2437), 'numpy.zeros', 'np.zeros', (['(height, width, bitness // 8)'], {'dtype': 'np.uint8'}), '((height, width, bitness // 8), dtype=np.uint8)\n', (2390, 2437), True, 'import numpy as np\n'), ((2738, 2766), 'cv2.imwrite', 'cv2.imwrite', (['mask_path', 'mask'], {}), '(mask_path, mask)\n', (2749, 2766), False, 'import cv2\n'), ((3264, 3297), 'tqdm.tqdm', 'tqdm', (['anno'], {'desc': '"""Generate masks"""'}), "(anno, desc='Generate masks')\n", (3268, 3297), False, 'from tqdm import tqdm\n'), ((2692, 2733), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', '[points]'], {'color': 'color'}), '(mask, [points], color=color)\n', (2704, 2733), False, 'import cv2\n'), ((3413, 3439), 'os.path.dirname', 'os.path.dirname', (['mask_path'], {}), '(mask_path)\n', (3428, 3439), False, 'import os\n'), ((1401, 1422), 'lxml.etree.parse', 'etree.parse', (['cvat_xml'], {}), '(cvat_xml)\n', (1412, 1422), False, 'from lxml import etree\n'), ((3473, 3509), 'os.makedirs', 'os.makedirs', (['mask_dir'], {'exist_ok': '(True)'}), '(mask_dir, exist_ok=True)\n', (3484, 3509), False, 'import os\n'), ((3349, 3380), 'os.path.splitext', 'os.path.splitext', (["image['name']"], {}), "(image['name'])\n", (3365, 3380), False, 'import os\n')]
|
import sys
from flask import Blueprint, request, jsonify
from flaskApp import db
from flaskApp.assignment.utils import *
from flaskApp.error.error_handlers import *
import json
from flaskApp.helpers import getAssignmentData
assignment = Blueprint('assignment', __name__)
@assignment.route('/restoreAssignment/<calID>/<courseID>', methods=['POST'])
def restore_assignment(calID, courseID):
try:
DbAssignmentUtils.restore_all_original_assignment(calID, courseID)
return jsonify({"restore" : "success"}), 201
except (NotFound) as e:
return jsonify(e.body), e.status_code
@assignment.route('/getAssignment/<calID>/<courseID>/<assignment>', methods=['GET'])
def get_assignment_details(calID, courseID, assignment):
try:
res = DbAssignmentUtils.get_assignment_slot_details(calID, courseID, assignment)
return jsonify(res), 200
except (NotFound) as e:
return jsonify(e.body), e.status_code
@assignment.route('/deleteAssignment/<calID>/<courseID>', methods=['DELETE'])
def delete_assignment(calID, courseID):
try:
request_body = json.loads(request.get_data())
DbAssignmentUtils.delete_assignment_slot(calID, courseID, request_body)
return jsonify({}), 204
except (NotFound, BadRequest) as e:
return jsonify(e.body), e.status_code
@assignment.route('/addAssignment/<calID>/<courseID>', methods=['POST'])
def add_assignment(calID, courseID):
try:
request_body = json.loads(request.get_data())
res = DbAssignmentUtils.add_Assignment_slot(calID, courseID, request_body)
return jsonify(res), 201
except (NotFound, BadRequest, ValidationFailed) as e:
return jsonify(e.body), e.status_code
'''Test method, keep just in case. Will prob be moved to seperate API designed to
interact with just the MySQL database that the data pipeline will drop stuff into'''
@assignment.route('/getAssignmentTest/<courseID>', methods=['GET'])
def get_session_assignment(courseID):
try:
result = getAssignmentData(courseID)
return jsonify(result)
except (NotFound) as e:
return jsonify(e.body), e.status_code
|
[
"flask.request.get_data",
"flask.Blueprint",
"flaskApp.helpers.getAssignmentData",
"flask.jsonify"
] |
[((238, 271), 'flask.Blueprint', 'Blueprint', (['"""assignment"""', '__name__'], {}), "('assignment', __name__)\n", (247, 271), False, 'from flask import Blueprint, request, jsonify\n'), ((2024, 2051), 'flaskApp.helpers.getAssignmentData', 'getAssignmentData', (['courseID'], {}), '(courseID)\n', (2041, 2051), False, 'from flaskApp.helpers import getAssignmentData\n'), ((2067, 2082), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (2074, 2082), False, 'from flask import Blueprint, request, jsonify\n'), ((490, 521), 'flask.jsonify', 'jsonify', (["{'restore': 'success'}"], {}), "({'restore': 'success'})\n", (497, 521), False, 'from flask import Blueprint, request, jsonify\n'), ((858, 870), 'flask.jsonify', 'jsonify', (['res'], {}), '(res)\n', (865, 870), False, 'from flask import Blueprint, request, jsonify\n'), ((1112, 1130), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (1128, 1130), False, 'from flask import Blueprint, request, jsonify\n'), ((1227, 1238), 'flask.jsonify', 'jsonify', (['{}'], {}), '({})\n', (1234, 1238), False, 'from flask import Blueprint, request, jsonify\n'), ((1484, 1502), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (1500, 1502), False, 'from flask import Blueprint, request, jsonify\n'), ((1602, 1614), 'flask.jsonify', 'jsonify', (['res'], {}), '(res)\n', (1609, 1614), False, 'from flask import Blueprint, request, jsonify\n'), ((571, 586), 'flask.jsonify', 'jsonify', (['e.body'], {}), '(e.body)\n', (578, 586), False, 'from flask import Blueprint, request, jsonify\n'), ((919, 934), 'flask.jsonify', 'jsonify', (['e.body'], {}), '(e.body)\n', (926, 934), False, 'from flask import Blueprint, request, jsonify\n'), ((1299, 1314), 'flask.jsonify', 'jsonify', (['e.body'], {}), '(e.body)\n', (1306, 1314), False, 'from flask import Blueprint, request, jsonify\n'), ((1693, 1708), 'flask.jsonify', 'jsonify', (['e.body'], {}), '(e.body)\n', (1700, 1708), False, 'from flask import Blueprint, request, jsonify\n'), ((2126, 2141), 'flask.jsonify', 'jsonify', (['e.body'], {}), '(e.body)\n', (2133, 2141), False, 'from flask import Blueprint, request, jsonify\n')]
|
import os
import shutil
SOURCE_DIR = '../deploy/runtime'
TARGET_DIR = 'SAM.app/Contents/runtime'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
SOURCE_DIR = '../deploy/solar_resource'
TARGET_DIR = 'SAM.app/Contents/solar_resource'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
SOURCE_DIR = '../deploy/wind_resource'
TARGET_DIR = 'SAM.app/Contents/wind_resource'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
SOURCE_DIR = '../deploy/libraries'
TARGET_DIR = 'SAM.app/Contents/libraries'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
|
[
"os.path.exists",
"shutil.ignore_patterns",
"shutil.rmtree"
] |
[((102, 128), 'os.path.exists', 'os.path.exists', (['TARGET_DIR'], {}), '(TARGET_DIR)\n', (116, 128), False, 'import os\n'), ((332, 358), 'os.path.exists', 'os.path.exists', (['TARGET_DIR'], {}), '(TARGET_DIR)\n', (346, 358), False, 'import os\n'), ((560, 586), 'os.path.exists', 'os.path.exists', (['TARGET_DIR'], {}), '(TARGET_DIR)\n', (574, 586), False, 'import os\n'), ((780, 806), 'os.path.exists', 'os.path.exists', (['TARGET_DIR'], {}), '(TARGET_DIR)\n', (794, 806), False, 'import os\n'), ((134, 159), 'shutil.rmtree', 'shutil.rmtree', (['TARGET_DIR'], {}), '(TARGET_DIR)\n', (147, 159), False, 'import shutil\n'), ((364, 389), 'shutil.rmtree', 'shutil.rmtree', (['TARGET_DIR'], {}), '(TARGET_DIR)\n', (377, 389), False, 'import shutil\n'), ((592, 617), 'shutil.rmtree', 'shutil.rmtree', (['TARGET_DIR'], {}), '(TARGET_DIR)\n', (605, 617), False, 'import shutil\n'), ((812, 837), 'shutil.rmtree', 'shutil.rmtree', (['TARGET_DIR'], {}), '(TARGET_DIR)\n', (825, 837), False, 'import shutil\n'), ((208, 238), 'shutil.ignore_patterns', 'shutil.ignore_patterns', (['""".git"""'], {}), "('.git')\n", (230, 238), False, 'import shutil\n'), ((438, 468), 'shutil.ignore_patterns', 'shutil.ignore_patterns', (['""".git"""'], {}), "('.git')\n", (460, 468), False, 'import shutil\n'), ((666, 696), 'shutil.ignore_patterns', 'shutil.ignore_patterns', (['""".git"""'], {}), "('.git')\n", (688, 696), False, 'import shutil\n'), ((886, 916), 'shutil.ignore_patterns', 'shutil.ignore_patterns', (['""".git"""'], {}), "('.git')\n", (908, 916), False, 'import shutil\n')]
|
# -*- coding: utf-8 -*-
"""Tests of GLSAR and diagnostics against Gretl
Created on Thu Feb 02 21:15:47 2012
Author: <NAME>
License: BSD-3
"""
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.stats.sandwich_covariance as sw
import statsmodels.stats.diagnostic as smsdia
import statsmodels.stats.outliers_influence as oi
def compare_ftest(contrast_res, other, decimal=(5,4)):
assert_almost_equal(contrast_res.fvalue, other[0], decimal=decimal[0])
assert_almost_equal(contrast_res.pvalue, other[1], decimal=decimal[1])
assert_equal(contrast_res.df_num, other[2])
assert_equal(contrast_res.df_denom, other[3])
assert_equal("f", other[4])
class TestGLSARGretl:
def test_all(self):
d = macrodata.load_pandas().data
#import datasetswsm.greene as g
#d = g.load('5-1')
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv'].values))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp'].values))
#simple diff, not growthrate, I want heteroscedasticity later for testing
endogd = np.diff(d['realinv'])
exogd = add_constant(np.c_[np.diff(d['realgdp'].values), d['realint'][:-1].values])
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, d['realint'][:-1].values])
res_ols = OLS(endogg, exogg).fit()
#print res_ols.params
mod_g1 = GLSAR(endogg, exogg, rho=-0.108136)
res_g1 = mod_g1.fit()
#print res_g1.params
mod_g2 = GLSAR(endogg, exogg, rho=-0.108136) #-0.1335859) from R
res_g2 = mod_g2.iterative_fit(maxiter=5)
#print res_g2.params
rho = -0.108136
# coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
partable = np.array([
[-9.50990, 0.990456, -9.602, 3.65e-018, -11.4631, -7.55670], # ***
[ 4.37040, 0.208146, 21.00, 2.93e-052, 3.95993, 4.78086], # ***
[-0.579253, 0.268009, -2.161, 0.0319, -1.10777, -0.0507346]]) # **
#Statistics based on the rho-differenced data:
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.113973),
endog_std = ("S.D. dependent var", 18.67447),
ssr = ("Sum squared resid", 22530.90),
mse_resid_sqrt = ("S.E. of regression", 10.66735),
rsquared = ("R-squared", 0.676973),
rsquared_adj = ("Adjusted R-squared", 0.673710),
fvalue = ("F(2, 198)", 221.0475),
f_pvalue = ("P-value(F)", 3.56e-51),
resid_acf1 = ("rho", -0.003481),
dw = ("Durbin-Watson", 1.993858))
#fstatistic, p-value, df1, df2
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"]
#LM-statistic, p-value, df
arch_4 = [7.30776, 0.120491, 4, "chi2"]
#multicollinearity
vif = [1.002, 1.002]
cond_1norm = 6862.0664
determinant = 1.0296049e+009
reciprocal_condition_number = 0.013819244
#Chi-square(2): test-statistic, pvalue, df
normality = [20.2792, 3.94837e-005, 2]
#tests
res = res_g1 #with rho from Gretl
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 6)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=4)
assert_allclose(res.f_pvalue,
result_gretl_g1['f_pvalue'][1],
rtol=1e-2)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=4)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
#tests
res = res_g2 #with estimated rho
#estimated lag coefficient
assert_almost_equal(res.model.rho, rho, decimal=3)
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 3)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=0)
assert_almost_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], decimal=6)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(2,4))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(2,4))
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=1)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=2)
'''
Performing iterative calculation of rho...
ITER RHO ESS
1 -0.10734 22530.9
2 -0.10814 22530.9
Model 4: Cochrane-Orcutt, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
rho = -0.108136
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.50990 0.990456 -9.602 3.65e-018 ***
ds_l_realgdp 4.37040 0.208146 21.00 2.93e-052 ***
realint_1 -0.579253 0.268009 -2.161 0.0319 **
Statistics based on the rho-differenced data:
Mean dependent var 3.113973 S.D. dependent var 18.67447
Sum squared resid 22530.90 S.E. of regression 10.66735
R-squared 0.676973 Adjusted R-squared 0.673710
F(2, 198) 221.0475 P-value(F) 3.56e-51
rho -0.003481 Durbin-Watson 1.993858
'''
'''
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023:
'''
'''
Test for ARCH of order 4
coefficient std. error t-ratio p-value
--------------------------------------------------------
alpha(0) 97.0386 20.3234 4.775 3.56e-06 ***
alpha(1) 0.176114 0.0714698 2.464 0.0146 **
alpha(2) -0.0488339 0.0724981 -0.6736 0.5014
alpha(3) -0.0705413 0.0737058 -0.9571 0.3397
alpha(4) 0.0384531 0.0725763 0.5298 0.5968
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491:
'''
'''
Variance Inflation Factors
Minimum possible value = 1.0
Values > 10.0 may indicate a collinearity problem
ds_l_realgdp 1.002
realint_1 1.002
VIF(j) = 1/(1 - R(j)^2), where R(j) is the multiple correlation coefficient
between variable j and the other independent variables
Properties of matrix X'X:
1-norm = 6862.0664
Determinant = 1.0296049e+009
Reciprocal condition number = 0.013819244
'''
'''
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491
Test of common factor restriction -
Null hypothesis: restriction is acceptable
Test statistic: F(2, 195) = 0.426391
with p-value = P(F(2, 195) > 0.426391) = 0.653468
Test for normality of residual -
Null hypothesis: error is normally distributed
Test statistic: Chi-square(2) = 20.2792
with p-value = 3.94837e-005:
'''
#no idea what this is
'''
Augmented regression for common factor test
OLS, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
coefficient std. error t-ratio p-value
---------------------------------------------------------------
const -10.9481 1.35807 -8.062 7.44e-014 ***
ds_l_realgdp 4.28893 0.229459 18.69 2.40e-045 ***
realint_1 -0.662644 0.334872 -1.979 0.0492 **
ds_l_realinv_1 -0.108892 0.0715042 -1.523 0.1294
ds_l_realgdp_1 0.660443 0.390372 1.692 0.0923 *
realint_2 0.0769695 0.341527 0.2254 0.8219
Sum of squared residuals = 22432.8
Test of common factor restriction
Test statistic: F(2, 195) = 0.426391, with p-value = 0.653468
'''
################ with OLS, HAC errors
#Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
#Dependent variable: ds_l_realinv
#HAC standard errors, bandwidth 4 (Bartlett kernel)
#coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
#for confidence interval t(199, 0.025) = 1.972
partable = np.array([
[-9.48167, 1.17709, -8.055, 7.17e-014, -11.8029, -7.16049], # ***
[4.37422, 0.328787, 13.30, 2.62e-029, 3.72587, 5.02258], #***
[-0.613997, 0.293619, -2.091, 0.0378, -1.19300, -0.0349939]]) # **
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.257395),
endog_std = ("S.D. dependent var", 18.73915),
ssr = ("Sum squared resid", 22799.68),
mse_resid_sqrt = ("S.E. of regression", 10.70380),
rsquared = ("R-squared", 0.676978),
rsquared_adj = ("Adjusted R-squared", 0.673731),
fvalue = ("F(2, 199)", 90.79971),
f_pvalue = ("P-value(F)", 9.53e-29),
llf = ("Log-likelihood", -763.9752),
aic = ("Akaike criterion", 1533.950),
bic = ("Schwarz criterion", 1543.875),
hqic = ("Hannan-Quinn", 1537.966),
resid_acf1 = ("rho", -0.107341),
dw = ("Durbin-Watson", 2.213805))
linear_logs = [1.68351, 0.430953, 2, "chi2"]
#for logs: dropping 70 nan or incomplete observations, T=133
#(res_ols.model.exog <=0).any(1).sum() = 69 ?not 70
linear_squares = [7.52477, 0.0232283, 2, "chi2"]
#Autocorrelation, Breusch-Godfrey test for autocorrelation up to order 4
lm_acorr4 = [1.17928, 0.321197, 4, 195, "F"]
lm2_acorr4 = [4.771043, 0.312, 4, "chi2"]
acorr_ljungbox4 = [5.23587, 0.264, 4, "chi2"]
#break
cusum_Harvey_Collier = [0.494432, 0.621549, 198, "t"] #stats.t.sf(0.494432, 198)*2
#see cusum results in files
break_qlr = [3.01985, 0.1, 3, 196, "maxF"] #TODO check this, max at 2001:4
break_chow = [13.1897, 0.00424384, 3, "chi2"] # break at 1984:1
arch_4 = [3.43473, 0.487871, 4, "chi2"]
normality = [23.962, 0.00001, 2, "chi2"]
het_white = [33.503723, 0.000003, 5, "chi2"]
het_breusch_pagan = [1.302014, 0.521520, 2, "chi2"] #TODO: not available
het_breusch_pagan_konker = [0.709924, 0.701200, 2, "chi2"]
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"] #not available
cond_1norm = 5984.0525
determinant = 7.1087467e+008
reciprocal_condition_number = 0.013826504
vif = [1.001, 1.001]
names = 'date residual leverage influence DFFITS'.split()
cur_dir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(cur_dir, 'results/leverage_influence_ols_nostars.txt')
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=1,
converters={0:lambda s: s})
#either numpy 1.6 or python 3.2 changed behavior
if np.isnan(lev[-1]['f1']):
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=2,
converters={0:lambda s: s})
lev.dtype.names = names
res = res_ols #for easier copying
cov_hac = sw.cov_hac_simple(res, nlags=4, use_correction=False)
bse_hac = sw.se_cov(cov_hac)
assert_almost_equal(res.params, partable[:,0], 5)
assert_almost_equal(bse_hac, partable[:,1], 5)
#TODO
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=4) #not in gretl
assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=6) #FAIL
assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=6) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
#f-value is based on cov_hac I guess
#res2 = res.get_robustcov_results(cov_type='HC1')
# TODO: fvalue differs from Gretl, trying any of the HCx
#assert_almost_equal(res2.fvalue, result_gretl_g1['fvalue'][1], decimal=0) #FAIL
#assert_approx_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], significant=1) #FAIL
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(6,5))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(6,5))
linear_sq = smsdia.linear_lm(res.resid, res.model.exog)
assert_almost_equal(linear_sq[0], linear_squares[0], decimal=6)
assert_almost_equal(linear_sq[1], linear_squares[1], decimal=7)
hbpk = smsdia.het_breuschpagan(res.resid, res.model.exog)
assert_almost_equal(hbpk[0], het_breusch_pagan_konker[0], decimal=6)
assert_almost_equal(hbpk[1], het_breusch_pagan_konker[1], decimal=6)
hw = smsdia.het_white(res.resid, res.model.exog)
assert_almost_equal(hw[:2], het_white[:2], 6)
#arch
#sm_arch = smsdia.acorr_lm(res.resid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.resid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=5)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
vif2 = [oi.variance_inflation_factor(res.model.exog, k) for k in [1,2]]
infl = oi.OLSInfluence(res_ols)
#print np.max(np.abs(lev['DFFITS'] - infl.dffits[0]))
#print np.max(np.abs(lev['leverage'] - infl.hat_matrix_diag))
#print np.max(np.abs(lev['influence'] - infl.influence)) #just added this based on Gretl
#just rough test, low decimal in Gretl output,
assert_almost_equal(lev['residual'], res.resid, decimal=3)
assert_almost_equal(lev['DFFITS'], infl.dffits[0], decimal=3)
assert_almost_equal(lev['leverage'], infl.hat_matrix_diag, decimal=3)
assert_almost_equal(lev['influence'], infl.influence, decimal=4)
def test_GLSARlag():
#test that results for lag>1 is close to lag=1, and smaller ssr
from statsmodels.datasets import macrodata
d2 = macrodata.load_pandas().data
g_gdp = 400*np.diff(np.log(d2['realgdp'].values))
g_inv = 400*np.diff(np.log(d2['realinv'].values))
exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1].values], prepend=False)
mod1 = GLSAR(g_inv, exogg, 1)
res1 = mod1.iterative_fit(5)
mod4 = GLSAR(g_inv, exogg, 4)
res4 = mod4.iterative_fit(10)
assert_array_less(np.abs(res1.params / res4.params - 1), 0.03)
assert_array_less(res4.ssr, res1.ssr)
assert_array_less(np.abs(res4.bse / res1.bse) - 1, 0.015)
assert_array_less(np.abs((res4.fittedvalues / res1.fittedvalues - 1).mean()),
0.015)
assert_equal(len(mod4.rho), 4)
if __name__ == '__main__':
t = TestGLSARGretl()
t.test_all()
'''
Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: ds_l_realinv
HAC standard errors, bandwidth 4 (Bartlett kernel)
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.48167 1.17709 -8.055 7.17e-014 ***
ds_l_realgdp 4.37422 0.328787 13.30 2.62e-029 ***
realint_1 -0.613997 0.293619 -2.091 0.0378 **
Mean dependent var 3.257395 S.D. dependent var 18.73915
Sum squared resid 22799.68 S.E. of regression 10.70380
R-squared 0.676978 Adjusted R-squared 0.673731
F(2, 199) 90.79971 P-value(F) 9.53e-29
Log-likelihood -763.9752 Akaike criterion 1533.950
Schwarz criterion 1543.875 Hannan-Quinn 1537.966
rho -0.107341 Durbin-Watson 2.213805
QLR test for structural break -
Null hypothesis: no structural break
Test statistic: max F(3, 196) = 3.01985 at observation 2001:4
(10 percent critical value = 4.09)
Non-linearity test (logs) -
Null hypothesis: relationship is linear
Test statistic: LM = 1.68351
with p-value = P(Chi-square(2) > 1.68351) = 0.430953
Non-linearity test (squares) -
Null hypothesis: relationship is linear
Test statistic: LM = 7.52477
with p-value = P(Chi-square(2) > 7.52477) = 0.0232283
LM test for autocorrelation up to order 4 -
Null hypothesis: no autocorrelation
Test statistic: LMF = 1.17928
with p-value = P(F(4,195) > 1.17928) = 0.321197
CUSUM test for parameter stability -
Null hypothesis: no change in parameters
Test statistic: Harvey-Collier t(198) = 0.494432
with p-value = P(t(198) > 0.494432) = 0.621549
Chow test for structural break at observation 1984:1 -
Null hypothesis: no structural break
Asymptotic test statistic: Chi-square(3) = 13.1897
with p-value = 0.00424384
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 3.43473
with p-value = P(Chi-square(4) > 3.43473) = 0.487871:
#ANOVA
Analysis of Variance:
Sum of squares df Mean square
Regression 47782.7 2 23891.3
Residual 22799.7 199 114.571
Total 70582.3 201 351.156
R^2 = 47782.7 / 70582.3 = 0.676978
F(2, 199) = 23891.3 / 114.571 = 208.528 [p-value 1.47e-049]
#LM-test autocorrelation
Breusch-Godfrey test for autocorrelation up to order 4
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 0.0640964 1.06719 0.06006 0.9522
ds_l_realgdp -0.0456010 0.217377 -0.2098 0.8341
realint_1 0.0511769 0.293136 0.1746 0.8616
uhat_1 -0.104707 0.0719948 -1.454 0.1475
uhat_2 -0.00898483 0.0742817 -0.1210 0.9039
uhat_3 0.0837332 0.0735015 1.139 0.2560
uhat_4 -0.0636242 0.0737363 -0.8629 0.3893
Unadjusted R-squared = 0.023619
Test statistic: LMF = 1.179281,
with p-value = P(F(4,195) > 1.17928) = 0.321
Alternative statistic: TR^2 = 4.771043,
with p-value = P(Chi-square(4) > 4.77104) = 0.312
Ljung-Box Q' = 5.23587,
with p-value = P(Chi-square(4) > 5.23587) = 0.264:
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023
#heteroscedasticity White
White's test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 104.920 21.5848 4.861 2.39e-06 ***
ds_l_realgdp -29.7040 6.24983 -4.753 3.88e-06 ***
realint_1 -6.93102 6.95607 -0.9964 0.3203
sq_ds_l_realg 4.12054 0.684920 6.016 8.62e-09 ***
X2_X3 2.89685 1.38571 2.091 0.0379 **
sq_realint_1 0.662135 1.10919 0.5970 0.5512
Unadjusted R-squared = 0.165860
Test statistic: TR^2 = 33.503723,
with p-value = P(Chi-square(5) > 33.503723) = 0.000003:
#heteroscedasticity Breusch-Pagan (original)
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 1.09468 0.192281 5.693 4.43e-08 ***
ds_l_realgdp -0.0323119 0.0386353 -0.8363 0.4040
realint_1 0.00410778 0.0512274 0.08019 0.9362
Explained sum of squares = 2.60403
Test statistic: LM = 1.302014,
with p-value = P(Chi-square(2) > 1.302014) = 0.521520
#heteroscedasticity Breusch-Pagan Koenker
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2 (Koenker robust variant)
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 10.6870 21.7027 0.4924 0.6230
ds_l_realgdp -3.64704 4.36075 -0.8363 0.4040
realint_1 0.463643 5.78202 0.08019 0.9362
Explained sum of squares = 33174.2
Test statistic: LM = 0.709924,
with p-value = P(Chi-square(2) > 0.709924) = 0.701200
########## forecast
#forecast mean y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 2.946312 -22.987904 - -11.367905
2008:4 -27.665860 -36.294434 3.036851 -42.282972 - -30.305896
2009:1 -70.239280 -44.018178 4.007017 -51.919841 - -36.116516
2009:2 -27.024588 -12.284842 1.427414 -15.099640 - -9.470044
2009:3 8.078897 4.483669 1.315876 1.888819 - 7.078520
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
#forecast actual y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 11.101892 -39.070353 - 4.714544
2008:4 -27.665860 -36.294434 11.126262 -58.234939 - -14.353928
2009:1 -70.239280 -44.018178 11.429236 -66.556135 - -21.480222
2009:2 -27.024588 -12.284842 10.798554 -33.579120 - 9.009436
2009:3 8.078897 4.483669 10.784377 -16.782652 - 25.749991
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
'''
|
[
"numpy.sqrt",
"numpy.testing.assert_equal",
"numpy.log",
"statsmodels.stats.diagnostic.het_white",
"numpy.array",
"numpy.genfromtxt",
"statsmodels.stats.diagnostic.het_breuschpagan",
"numpy.testing.assert_array_less",
"statsmodels.stats.outliers_influence.OLSInfluence",
"numpy.testing.assert_allclose",
"statsmodels.stats.outliers_influence.reset_ramsey",
"numpy.diff",
"numpy.testing.assert_almost_equal",
"statsmodels.regression.linear_model.OLS",
"statsmodels.regression.linear_model.GLSAR",
"statsmodels.tools.tools.add_constant",
"numpy.abs",
"statsmodels.stats.sandwich_covariance.cov_hac_simple",
"os.path.dirname",
"statsmodels.stats.outliers_influence.variance_inflation_factor",
"numpy.isnan",
"statsmodels.datasets.macrodata.load_pandas",
"statsmodels.stats.diagnostic.linear_lm",
"statsmodels.stats.diagnostic.het_arch",
"os.path.join",
"statsmodels.stats.sandwich_covariance.se_cov"
] |
[((662, 732), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['contrast_res.fvalue', 'other[0]'], {'decimal': 'decimal[0]'}), '(contrast_res.fvalue, other[0], decimal=decimal[0])\n', (681, 732), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((737, 807), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['contrast_res.pvalue', 'other[1]'], {'decimal': 'decimal[1]'}), '(contrast_res.pvalue, other[1], decimal=decimal[1])\n', (756, 807), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((812, 855), 'numpy.testing.assert_equal', 'assert_equal', (['contrast_res.df_num', 'other[2]'], {}), '(contrast_res.df_num, other[2])\n', (824, 855), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((860, 905), 'numpy.testing.assert_equal', 'assert_equal', (['contrast_res.df_denom', 'other[3]'], {}), '(contrast_res.df_denom, other[3])\n', (872, 905), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((910, 937), 'numpy.testing.assert_equal', 'assert_equal', (['"""f"""', 'other[4]'], {}), "('f', other[4])\n", (922, 937), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((17494, 17562), 'statsmodels.tools.tools.add_constant', 'add_constant', (["np.c_[g_gdp, d2['realint'][:-1].values]"], {'prepend': '(False)'}), "(np.c_[g_gdp, d2['realint'][:-1].values], prepend=False)\n", (17506, 17562), False, 'from statsmodels.tools.tools import add_constant\n'), ((17575, 17597), 'statsmodels.regression.linear_model.GLSAR', 'GLSAR', (['g_inv', 'exogg', '(1)'], {}), '(g_inv, exogg, 1)\n', (17580, 17597), False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((17643, 17665), 'statsmodels.regression.linear_model.GLSAR', 'GLSAR', (['g_inv', 'exogg', '(4)'], {}), '(g_inv, exogg, 4)\n', (17648, 17665), False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((17772, 17809), 'numpy.testing.assert_array_less', 'assert_array_less', (['res4.ssr', 'res1.ssr'], {}), '(res4.ssr, res1.ssr)\n', (17789, 17809), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((1351, 1372), 'numpy.diff', 'np.diff', (["d['realinv']"], {}), "(d['realinv'])\n", (1358, 1372), True, 'import numpy as np\n'), ((1512, 1571), 'statsmodels.tools.tools.add_constant', 'add_constant', (["np.c_[gs_l_realgdp, d['realint'][:-1].values]"], {}), "(np.c_[gs_l_realgdp, d['realint'][:-1].values])\n", (1524, 1571), False, 'from statsmodels.tools.tools import add_constant\n'), ((1664, 1699), 'statsmodels.regression.linear_model.GLSAR', 'GLSAR', (['endogg', 'exogg'], {'rho': '(-0.108136)'}), '(endogg, exogg, rho=-0.108136)\n', (1669, 1699), False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((1777, 1812), 'statsmodels.regression.linear_model.GLSAR', 'GLSAR', (['endogg', 'exogg'], {'rho': '(-0.108136)'}), '(endogg, exogg, rho=-0.108136)\n', (1782, 1812), False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((2055, 2247), 'numpy.array', 'np.array', (['[[-9.5099, 0.990456, -9.602, 3.65e-18, -11.4631, -7.5567], [4.3704, \n 0.208146, 21.0, 2.93e-52, 3.95993, 4.78086], [-0.579253, 0.268009, -\n 2.161, 0.0319, -1.10777, -0.0507346]]'], {}), '([[-9.5099, 0.990456, -9.602, 3.65e-18, -11.4631, -7.5567], [4.3704,\n 0.208146, 21.0, 2.93e-52, 3.95993, 4.78086], [-0.579253, 0.268009, -\n 2.161, 0.0319, -1.10777, -0.0507346]])\n', (2063, 2247), True, 'import numpy as np\n'), ((3619, 3669), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.params', 'partable[:, 0]', '(4)'], {}), '(res.params, partable[:, 0], 4)\n', (3638, 3669), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((3677, 3724), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.bse', 'partable[:, 1]', '(6)'], {}), '(res.bse, partable[:, 1], 6)\n', (3696, 3724), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((3732, 3783), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.tvalues', 'partable[:, 2]', '(2)'], {}), '(res.tvalues, partable[:, 2], 2)\n', (3751, 3783), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((3792, 3858), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.ssr', "result_gretl_g1['ssr'][1]"], {'decimal': '(2)'}), "(res.ssr, result_gretl_g1['ssr'][1], decimal=2)\n", (3811, 3858), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((4250, 4322), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.fvalue', "result_gretl_g1['fvalue'][1]"], {'decimal': '(4)'}), "(res.fvalue, result_gretl_g1['fvalue'][1], decimal=4)\n", (4269, 4322), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((4331, 4403), 'numpy.testing.assert_allclose', 'assert_allclose', (['res.f_pvalue', "result_gretl_g1['f_pvalue'][1]"], {'rtol': '(0.01)'}), "(res.f_pvalue, result_gretl_g1['f_pvalue'][1], rtol=0.01)\n", (4346, 4403), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((4650, 4686), 'statsmodels.stats.diagnostic.het_arch', 'smsdia.het_arch', (['res.wresid'], {'nlags': '(4)'}), '(res.wresid, nlags=4)\n', (4665, 4686), True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((4695, 4748), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sm_arch[0]', 'arch_4[0]'], {'decimal': '(4)'}), '(sm_arch[0], arch_4[0], decimal=4)\n', (4714, 4748), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((4757, 4810), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sm_arch[1]', 'arch_4[1]'], {'decimal': '(6)'}), '(sm_arch[1], arch_4[1], decimal=6)\n', (4776, 4810), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((4912, 4962), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.model.rho', 'rho'], {'decimal': '(3)'}), '(res.model.rho, rho, decimal=3)\n', (4931, 4962), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((4987, 5037), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.params', 'partable[:, 0]', '(4)'], {}), '(res.params, partable[:, 0], 4)\n', (5006, 5037), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((5045, 5092), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.bse', 'partable[:, 1]', '(3)'], {}), '(res.bse, partable[:, 1], 3)\n', (5064, 5092), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((5100, 5151), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.tvalues', 'partable[:, 2]', '(2)'], {}), '(res.tvalues, partable[:, 2], 2)\n', (5119, 5151), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((5160, 5226), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.ssr', "result_gretl_g1['ssr'][1]"], {'decimal': '(2)'}), "(res.ssr, result_gretl_g1['ssr'][1], decimal=2)\n", (5179, 5226), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((5618, 5690), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.fvalue', "result_gretl_g1['fvalue'][1]"], {'decimal': '(0)'}), "(res.fvalue, result_gretl_g1['fvalue'][1], decimal=0)\n", (5637, 5690), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((5699, 5775), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.f_pvalue', "result_gretl_g1['f_pvalue'][1]"], {'decimal': '(6)'}), "(res.f_pvalue, result_gretl_g1['f_pvalue'][1], decimal=6)\n", (5718, 5775), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((5882, 5912), 'statsmodels.stats.outliers_influence.reset_ramsey', 'oi.reset_ramsey', (['res'], {'degree': '(2)'}), '(res, degree=2)\n', (5897, 5912), True, 'import statsmodels.stats.outliers_influence as oi\n'), ((5974, 6004), 'statsmodels.stats.outliers_influence.reset_ramsey', 'oi.reset_ramsey', (['res'], {'degree': '(3)'}), '(res, degree=3)\n', (5989, 6004), True, 'import statsmodels.stats.outliers_influence as oi\n'), ((6163, 6199), 'statsmodels.stats.diagnostic.het_arch', 'smsdia.het_arch', (['res.wresid'], {'nlags': '(4)'}), '(res.wresid, nlags=4)\n', (6178, 6199), True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((6208, 6261), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sm_arch[0]', 'arch_4[0]'], {'decimal': '(1)'}), '(sm_arch[0], arch_4[0], decimal=1)\n', (6227, 6261), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((6270, 6323), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sm_arch[1]', 'arch_4[1]'], {'decimal': '(2)'}), '(sm_arch[1], arch_4[1], decimal=2)\n', (6289, 6323), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((11095, 11288), 'numpy.array', 'np.array', (['[[-9.48167, 1.17709, -8.055, 7.17e-14, -11.8029, -7.16049], [4.37422, \n 0.328787, 13.3, 2.62e-29, 3.72587, 5.02258], [-0.613997, 0.293619, -\n 2.091, 0.0378, -1.193, -0.0349939]]'], {}), '([[-9.48167, 1.17709, -8.055, 7.17e-14, -11.8029, -7.16049], [\n 4.37422, 0.328787, 13.3, 2.62e-29, 3.72587, 5.02258], [-0.613997, \n 0.293619, -2.091, 0.0378, -1.193, -0.0349939]])\n', (11103, 11288), True, 'import numpy as np\n'), ((13880, 13947), 'os.path.join', 'os.path.join', (['cur_dir', '"""results/leverage_influence_ols_nostars.txt"""'], {}), "(cur_dir, 'results/leverage_influence_ols_nostars.txt')\n", (13892, 13947), False, 'import os\n'), ((13962, 14047), 'numpy.genfromtxt', 'np.genfromtxt', (['fpath'], {'skip_header': '(3)', 'skip_footer': '(1)', 'converters': '{(0): lambda s: s}'}), '(fpath, skip_header=3, skip_footer=1, converters={(0): lambda\n s: s})\n', (13975, 14047), True, 'import numpy as np\n'), ((14137, 14160), 'numpy.isnan', 'np.isnan', (["lev[-1]['f1']"], {}), "(lev[-1]['f1'])\n", (14145, 14160), True, 'import numpy as np\n'), ((14386, 14439), 'statsmodels.stats.sandwich_covariance.cov_hac_simple', 'sw.cov_hac_simple', (['res'], {'nlags': '(4)', 'use_correction': '(False)'}), '(res, nlags=4, use_correction=False)\n', (14403, 14439), True, 'import statsmodels.stats.sandwich_covariance as sw\n'), ((14459, 14477), 'statsmodels.stats.sandwich_covariance.se_cov', 'sw.se_cov', (['cov_hac'], {}), '(cov_hac)\n', (14468, 14477), True, 'import statsmodels.stats.sandwich_covariance as sw\n'), ((14487, 14537), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.params', 'partable[:, 0]', '(5)'], {}), '(res.params, partable[:, 0], 5)\n', (14506, 14537), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((14545, 14592), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['bse_hac', 'partable[:, 1]', '(5)'], {}), '(bse_hac, partable[:, 1], 5)\n', (14564, 14592), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((14615, 14681), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.ssr', "result_gretl_g1['ssr'][1]"], {'decimal': '(2)'}), "(res.ssr, result_gretl_g1['ssr'][1], decimal=2)\n", (14634, 14681), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((14690, 14756), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.llf', "result_gretl_g1['llf'][1]"], {'decimal': '(4)'}), "(res.llf, result_gretl_g1['llf'][1], decimal=4)\n", (14709, 14756), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((14779, 14855), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.rsquared', "result_gretl_g1['rsquared'][1]"], {'decimal': '(6)'}), "(res.rsquared, result_gretl_g1['rsquared'][1], decimal=6)\n", (14798, 14855), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((14870, 14958), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.rsquared_adj', "result_gretl_g1['rsquared_adj'][1]"], {'decimal': '(6)'}), "(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1],\n decimal=6)\n", (14889, 14958), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((15520, 15550), 'statsmodels.stats.outliers_influence.reset_ramsey', 'oi.reset_ramsey', (['res'], {'degree': '(2)'}), '(res, degree=2)\n', (15535, 15550), True, 'import statsmodels.stats.outliers_influence as oi\n'), ((15612, 15642), 'statsmodels.stats.outliers_influence.reset_ramsey', 'oi.reset_ramsey', (['res'], {'degree': '(3)'}), '(res, degree=3)\n', (15627, 15642), True, 'import statsmodels.stats.outliers_influence as oi\n'), ((15715, 15758), 'statsmodels.stats.diagnostic.linear_lm', 'smsdia.linear_lm', (['res.resid', 'res.model.exog'], {}), '(res.resid, res.model.exog)\n', (15731, 15758), True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((15767, 15830), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['linear_sq[0]', 'linear_squares[0]'], {'decimal': '(6)'}), '(linear_sq[0], linear_squares[0], decimal=6)\n', (15786, 15830), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((15839, 15902), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['linear_sq[1]', 'linear_squares[1]'], {'decimal': '(7)'}), '(linear_sq[1], linear_squares[1], decimal=7)\n', (15858, 15902), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((15919, 15969), 'statsmodels.stats.diagnostic.het_breuschpagan', 'smsdia.het_breuschpagan', (['res.resid', 'res.model.exog'], {}), '(res.resid, res.model.exog)\n', (15942, 15969), True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((15978, 16046), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['hbpk[0]', 'het_breusch_pagan_konker[0]'], {'decimal': '(6)'}), '(hbpk[0], het_breusch_pagan_konker[0], decimal=6)\n', (15997, 16046), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((16055, 16123), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['hbpk[1]', 'het_breusch_pagan_konker[1]'], {'decimal': '(6)'}), '(hbpk[1], het_breusch_pagan_konker[1], decimal=6)\n', (16074, 16123), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((16138, 16181), 'statsmodels.stats.diagnostic.het_white', 'smsdia.het_white', (['res.resid', 'res.model.exog'], {}), '(res.resid, res.model.exog)\n', (16154, 16181), True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((16190, 16235), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['hw[:2]', 'het_white[:2]', '(6)'], {}), '(hw[:2], het_white[:2], 6)\n', (16209, 16235), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((16342, 16377), 'statsmodels.stats.diagnostic.het_arch', 'smsdia.het_arch', (['res.resid'], {'nlags': '(4)'}), '(res.resid, nlags=4)\n', (16357, 16377), True, 'import statsmodels.stats.diagnostic as smsdia\n'), ((16386, 16439), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sm_arch[0]', 'arch_4[0]'], {'decimal': '(5)'}), '(sm_arch[0], arch_4[0], decimal=5)\n', (16405, 16439), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((16448, 16501), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sm_arch[1]', 'arch_4[1]'], {'decimal': '(6)'}), '(sm_arch[1], arch_4[1], decimal=6)\n', (16467, 16501), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((16599, 16623), 'statsmodels.stats.outliers_influence.OLSInfluence', 'oi.OLSInfluence', (['res_ols'], {}), '(res_ols)\n', (16614, 16623), True, 'import statsmodels.stats.outliers_influence as oi\n'), ((16918, 16976), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["lev['residual']", 'res.resid'], {'decimal': '(3)'}), "(lev['residual'], res.resid, decimal=3)\n", (16937, 16976), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((16985, 17046), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["lev['DFFITS']", 'infl.dffits[0]'], {'decimal': '(3)'}), "(lev['DFFITS'], infl.dffits[0], decimal=3)\n", (17004, 17046), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((17055, 17124), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["lev['leverage']", 'infl.hat_matrix_diag'], {'decimal': '(3)'}), "(lev['leverage'], infl.hat_matrix_diag, decimal=3)\n", (17074, 17124), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((17133, 17197), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["lev['influence']", 'infl.influence'], {'decimal': '(4)'}), "(lev['influence'], infl.influence, decimal=4)\n", (17152, 17197), False, 'from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_array_less\n'), ((17345, 17368), 'statsmodels.datasets.macrodata.load_pandas', 'macrodata.load_pandas', ([], {}), '()\n', (17366, 17368), False, 'from statsmodels.datasets import macrodata\n'), ((17723, 17760), 'numpy.abs', 'np.abs', (['(res1.params / res4.params - 1)'], {}), '(res1.params / res4.params - 1)\n', (17729, 17760), True, 'import numpy as np\n'), ((1000, 1023), 'statsmodels.datasets.macrodata.load_pandas', 'macrodata.load_pandas', ([], {}), '()\n', (1021, 1023), False, 'from statsmodels.datasets import macrodata\n'), ((4169, 4191), 'numpy.sqrt', 'np.sqrt', (['res.mse_resid'], {}), '(res.mse_resid)\n', (4176, 4191), True, 'import numpy as np\n'), ((5537, 5559), 'numpy.sqrt', 'np.sqrt', (['res.mse_resid'], {}), '(res.mse_resid)\n', (5544, 5559), True, 'import numpy as np\n'), ((13837, 13862), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (13852, 13862), False, 'import os\n'), ((14180, 14265), 'numpy.genfromtxt', 'np.genfromtxt', (['fpath'], {'skip_header': '(3)', 'skip_footer': '(2)', 'converters': '{(0): lambda s: s}'}), '(fpath, skip_header=3, skip_footer=2, converters={(0): lambda\n s: s})\n', (14193, 14265), True, 'import numpy as np\n'), ((14989, 15011), 'numpy.sqrt', 'np.sqrt', (['res.mse_resid'], {}), '(res.mse_resid)\n', (14996, 15011), True, 'import numpy as np\n'), ((16519, 16566), 'statsmodels.stats.outliers_influence.variance_inflation_factor', 'oi.variance_inflation_factor', (['res.model.exog', 'k'], {}), '(res.model.exog, k)\n', (16547, 16566), True, 'import statsmodels.stats.outliers_influence as oi\n'), ((17398, 17426), 'numpy.log', 'np.log', (["d2['realgdp'].values"], {}), "(d2['realgdp'].values)\n", (17404, 17426), True, 'import numpy as np\n'), ((17452, 17480), 'numpy.log', 'np.log', (["d2['realinv'].values"], {}), "(d2['realinv'].values)\n", (17458, 17480), True, 'import numpy as np\n'), ((17832, 17859), 'numpy.abs', 'np.abs', (['(res4.bse / res1.bse)'], {}), '(res4.bse / res1.bse)\n', (17838, 17859), True, 'import numpy as np\n'), ((1156, 1183), 'numpy.log', 'np.log', (["d['realinv'].values"], {}), "(d['realinv'].values)\n", (1162, 1183), True, 'import numpy as np\n'), ((1222, 1249), 'numpy.log', 'np.log', (["d['realgdp'].values"], {}), "(d['realgdp'].values)\n", (1228, 1249), True, 'import numpy as np\n'), ((1591, 1609), 'statsmodels.regression.linear_model.OLS', 'OLS', (['endogg', 'exogg'], {}), '(endogg, exogg)\n', (1594, 1609), False, 'from statsmodels.regression.linear_model import OLS, GLSAR\n'), ((1408, 1436), 'numpy.diff', 'np.diff', (["d['realgdp'].values"], {}), "(d['realgdp'].values)\n", (1415, 1436), True, 'import numpy as np\n')]
|
from unittest import TestCase
from datetime import date
from keras_en_parser_and_analyzer.library.pipmp_my_cv_classify import detect_date
class DetectDate(TestCase):
def test_detect_date(self):
dates_to_test = ['10-1990', '09/12/2020', 'jan 1990', 'feb 2012', '9-12-2020']
res = detect_date(dates_to_test[0])
self.assertEqual(10, res.month)
self.assertEqual(1990, res.year)
res = detect_date(dates_to_test[1])
self.assertEqual(9, res.month)
self.assertEqual(2020, res.year)
res = detect_date(dates_to_test[2])
self.assertEqual(1, res.month)
self.assertEqual(1990, res.year)
res = detect_date(dates_to_test[3])
self.assertEqual(2, res.month)
self.assertEqual(2012, res.year)
res = detect_date(dates_to_test[4])
self.assertEqual(9, res.month)
self.assertEqual(2020, res.year)
|
[
"keras_en_parser_and_analyzer.library.pipmp_my_cv_classify.detect_date"
] |
[((301, 330), 'keras_en_parser_and_analyzer.library.pipmp_my_cv_classify.detect_date', 'detect_date', (['dates_to_test[0]'], {}), '(dates_to_test[0])\n', (312, 330), False, 'from keras_en_parser_and_analyzer.library.pipmp_my_cv_classify import detect_date\n'), ((426, 455), 'keras_en_parser_and_analyzer.library.pipmp_my_cv_classify.detect_date', 'detect_date', (['dates_to_test[1]'], {}), '(dates_to_test[1])\n', (437, 455), False, 'from keras_en_parser_and_analyzer.library.pipmp_my_cv_classify import detect_date\n'), ((550, 579), 'keras_en_parser_and_analyzer.library.pipmp_my_cv_classify.detect_date', 'detect_date', (['dates_to_test[2]'], {}), '(dates_to_test[2])\n', (561, 579), False, 'from keras_en_parser_and_analyzer.library.pipmp_my_cv_classify import detect_date\n'), ((674, 703), 'keras_en_parser_and_analyzer.library.pipmp_my_cv_classify.detect_date', 'detect_date', (['dates_to_test[3]'], {}), '(dates_to_test[3])\n', (685, 703), False, 'from keras_en_parser_and_analyzer.library.pipmp_my_cv_classify import detect_date\n'), ((798, 827), 'keras_en_parser_and_analyzer.library.pipmp_my_cv_classify.detect_date', 'detect_date', (['dates_to_test[4]'], {}), '(dates_to_test[4])\n', (809, 827), False, 'from keras_en_parser_and_analyzer.library.pipmp_my_cv_classify import detect_date\n')]
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Ipset iptables generator. This is a subclass of Iptables generator.
ipset is a system inside the Linux kernel, which can very efficiently store
and match IPv4 and IPv6 addresses. This can be used to dramatically increase
performace of iptables firewall.
"""
import string
from capirca.lib import iptables
from capirca.lib import nacaddr
class Error(Exception):
"""Base error class."""
class Term(iptables.Term):
"""Single Ipset term representation."""
_PLATFORM = 'ipset'
_SET_MAX_LENGTH = 31
_POSTJUMP_FORMAT = None
_PREJUMP_FORMAT = None
_TERM_FORMAT = None
_COMMENT_FORMAT = string.Template(
'-A $filter -m comment --comment "$comment"')
_FILTER_TOP_FORMAT = string.Template('-A $filter')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# This stores tuples of set name and set contents, keyed by direction.
# For example:
# { 'src': ('set_name', [ipaddr object, ipaddr object]),
# 'dst': ('set_name', [ipaddr object, ipaddr object]) }
self.addr_sets = {}
def _CalculateAddresses(self, src_addr_list, src_addr_exclude_list,
dst_addr_list, dst_addr_exclude_list):
"""Calculates source and destination address list for a term.
Since ipset is very efficient at matching large number of
addresses, we never return any exclude addresses. Instead
least positive match is calculated for both source and destination
addresses.
For source and destination address list, three cases are possible.
First case is when there are no addresses. In that case we return
_all_ips.
Second case is when there is strictly one address. In that case,
we optimize by not generating a set, and it's then the only
element of returned set.
Third case is when there are more than one address in a set.
In that case we generate a set and also return _all_ips. Note the
difference to the first case where no set is actually generated.
Args:
src_addr_list: source address list of the term.
src_addr_exclude_list: source address exclude list of the term.
dst_addr_list: destination address list of the term.
dst_addr_exclude_list: destination address exclude list of the term.
Returns:
tuple containing source address list, source address exclude list,
destination address list, destination address exclude list in
that order.
"""
target_af = self.AF_MAP[self.af]
src_addr_list = self._CalculateAddrList(src_addr_list,
src_addr_exclude_list, target_af,
'src')
dst_addr_list = self._CalculateAddrList(dst_addr_list,
dst_addr_exclude_list, target_af,
'dst')
return (src_addr_list, [], dst_addr_list, [])
def _CalculateAddrList(self, addr_list, addr_exclude_list,
target_af, direction):
"""Calculates and stores address list for target AF and direction.
Args:
addr_list: address list.
addr_exclude_list: address exclude list of the term.
target_af: target address family.
direction: direction in which address list will be used.
Returns:
calculated address list.
"""
if not addr_list:
addr_list = [self._all_ips]
addr_list = [addr for addr in addr_list if addr.version == target_af]
if addr_exclude_list:
addr_exclude_list = [addr_exclude for addr_exclude in addr_exclude_list if
addr_exclude.version == target_af]
addr_list = nacaddr.ExcludeAddrs(addr_list, addr_exclude_list)
if len(addr_list) > 1:
set_name = self._GenerateSetName(self.term.name, direction)
self.addr_sets[direction] = (set_name, addr_list)
addr_list = [self._all_ips]
return addr_list
def _GenerateAddressStatement(self, src_addr, dst_addr):
"""Returns the address section of an individual iptables rule.
See _CalculateAddresses documentation. Three cases are possible here,
and they map directly to cases in _CalculateAddresses.
First, there can be no addresses for a direction (value is _all_ips then)
In that case we return empty string.
Second there can be stricly one address. In that case we return single
address match (-s or -d).
Third case, is when the value is _all_ips but also the set for particular
direction is present. That's when we return a set match.
Args:
src_addr: ipaddr address or network object with source
address of the rule.
dst_addr: ipaddr address or network object with destination
address of the rule.
Returns:
tuple containing source and destination address statement, in
that order.
"""
src_addr_stmt = ''
dst_addr_stmt = ''
if src_addr and dst_addr:
if src_addr == self._all_ips:
if 'src' in self.addr_sets:
src_addr_stmt = ('-m set --match-set %s src' %
self.addr_sets['src'][0])
else:
src_addr_stmt = '-s %s/%d' % (src_addr.network_address,
src_addr.prefixlen)
if dst_addr == self._all_ips:
if 'dst' in self.addr_sets:
dst_addr_stmt = ('-m set --match-set %s dst' %
self.addr_sets['dst'][0])
else:
dst_addr_stmt = '-d %s/%d' % (dst_addr.network_address,
dst_addr.prefixlen)
return (src_addr_stmt, dst_addr_stmt)
def _GenerateSetName(self, term_name, suffix):
if self.af == 'inet6':
suffix += '-v6'
if len(term_name) + len(suffix) + 1 > self._SET_MAX_LENGTH:
set_name_max_lenth = self._SET_MAX_LENGTH - len(suffix) - 1
term_name = term_name[:set_name_max_lenth]
return '%s-%s' % (term_name, suffix)
class Ipset(iptables.Iptables):
"""Ipset generator."""
_PLATFORM = 'ipset'
_SET_TYPE = 'hash:net'
SUFFIX = '.ips'
_TERM = Term
_MARKER_BEGIN = '# begin:ipset-rules'
_MARKER_END = '# end:ipset-rules'
_GOOD_OPTIONS = ['nostate', 'abbreviateterms', 'truncateterms', 'noverbose',
'exists']
# TODO(vklimovs): some not trivial processing is happening inside this
# __str__, replace with explicit method
def __str__(self):
# Actual rendering happens in __str__, so it has to be called
# before we do set specific part.
iptables_output = super().__str__()
output = []
output.append(self._MARKER_BEGIN)
for (_, _, _, _, terms) in self.iptables_policies:
for term in terms:
output.extend(self._GenerateSetConfig(term))
output.append(self._MARKER_END)
output.append(iptables_output)
return '\n'.join(output)
def _GenerateSetConfig(self, term):
"""Generates set configuration for supplied term.
Args:
term: input term.
Returns:
string that is configuration of supplied term.
"""
output = []
c_str = 'create'
a_str = 'add'
if 'exists' in self.filter_options:
c_str = c_str + ' -exist'
a_str = a_str + ' -exist'
for direction in sorted(term.addr_sets, reverse=True):
set_name, addr_list = term.addr_sets[direction]
set_hashsize = 1 << len(addr_list).bit_length()
set_maxelem = set_hashsize
output.append('%s %s %s family %s hashsize %i maxelem %i' %
(c_str,
set_name,
self._SET_TYPE,
term.af,
set_hashsize,
set_maxelem))
for address in addr_list:
output.append('%s %s %s' % (a_str, set_name, address))
return output
|
[
"capirca.lib.nacaddr.ExcludeAddrs",
"string.Template"
] |
[((1205, 1266), 'string.Template', 'string.Template', (['"""-A $filter -m comment --comment "$comment\\""""'], {}), '(\'-A $filter -m comment --comment "$comment"\')\n', (1220, 1266), False, 'import string\n'), ((1297, 1326), 'string.Template', 'string.Template', (['"""-A $filter"""'], {}), "('-A $filter')\n", (1312, 1326), False, 'import string\n'), ((4251, 4301), 'capirca.lib.nacaddr.ExcludeAddrs', 'nacaddr.ExcludeAddrs', (['addr_list', 'addr_exclude_list'], {}), '(addr_list, addr_exclude_list)\n', (4271, 4301), False, 'from capirca.lib import nacaddr\n')]
|
import warnings
import numba
import numpy as np
import strax
import straxen
DEFAULT_MAX_SAMPLES = 20_000
@straxen.mini_analysis(requires=('records',),
warn_beyond_sec=10,
default_time_selection='touching')
def records_matrix(records, time_range, seconds_range, config, to_pe,
max_samples=DEFAULT_MAX_SAMPLES,
ignore_max_sample_warning=False):
"""Return (wv_matrix, times, pms)
- wv_matrix: (n_samples, n_pmt) array with per-PMT waveform intensity in PE/ns
- times: time labels in seconds (corr. to rows)
- pmts: PMT numbers (corr. to columns)
Both times and pmts have one extra element.
:param max_samples: Maximum number of time samples. If window and dt
conspire to exceed this, waveforms will be downsampled.
:param ignore_max_sample_warning: If True, suppress warning when this happens.
Example:
wvm, ts, ys = st.records_matrix(run_id, seconds_range=(1., 1.00001))
plt.pcolormesh(ts, ys, wvm.T,
norm=matplotlib.colors.LogNorm())
plt.colorbar(label='Intensity [PE / ns]')
"""
if len(records):
dt = records[0]['dt']
samples_per_record = len(records[0]['data'])
else:
# Defaults here do not matter, nothing will be plotted anyway
dt = 10, 110
record_duration = samples_per_record * dt
window = time_range[1] - time_range[0]
if window / dt > max_samples:
with np.errstate(divide='ignore', invalid='ignore'):
# Downsample. New dt must be
# a) multiple of old dt
dts = np.arange(0, record_duration + dt, dt).astype(np.int)
# b) divisor of record duration
dts = dts[record_duration / dts % 1 == 0]
# c) total samples < max_samples
dts = dts[window / dts < max_samples]
if len(dts):
# Pick lowest dt that satisfies criteria
dt = dts.min()
else:
# Records will be downsampled to single points
dt = max(record_duration, window // max_samples)
if not ignore_max_sample_warning:
warnings.warn(f"Matrix would exceed max_samples {max_samples}, "
f"downsampling to dt = {dt} ns.")
wvm = _records_to_matrix(
records,
t0=time_range[0],
n_channels=config['n_tpc_pmts'],
dt=dt,
window=window)
wvm = wvm.astype(np.float32) * to_pe.reshape(1, -1) / dt
# Note + 1, so data for sample 0 will range from 0-1 in plot
ts = (np.arange(wvm.shape[0] + 1) * dt / int(1e9) + seconds_range[0])
ys = np.arange(wvm.shape[1] + 1)
return wvm, ts, ys
@straxen.mini_analysis(requires=('raw_records',),
warn_beyond_sec=3e-3,
default_time_selection='touching')
def raw_records_matrix(context, run_id, raw_records, time_range,
ignore_max_sample_warning=False,
max_samples=DEFAULT_MAX_SAMPLES,
**kwargs):
# Convert raw to records. We may not be able to baseline correctly
# at the start of the range due to missing zeroth fragments
records = strax.raw_to_records(raw_records)
strax.baseline(records, allow_sloppy_chunking=True)
strax.zero_out_of_bounds(records)
return context.records_matrix(run_id=run_id,
records=records,
time_range=time_range,
max_samples=max_samples,
ignore_max_sample_warning=ignore_max_sample_warning,
**kwargs)
@numba.njit
def _records_to_matrix(records, t0, window, n_channels, dt=10):
n_samples = (window // dt) + 1
# Use 32-bit integers, so downsampling saturated samples doesn't
# cause wraparounds
# TODO: amplitude bit shift!
y = np.zeros((n_samples, n_channels),
dtype=np.int32)
if not len(records):
return y
samples_per_record = len(records[0]['data'])
for r in records:
if r['channel'] > n_channels:
continue
if dt >= samples_per_record * r['dt']:
# Downsample to single sample -> store area
idx = (r['time'] - t0) // dt
if idx >= len(y):
print(len(y), idx)
raise IndexError('Despite n_samples = window // dt + 1, our '
'idx is too high?!')
y[idx, r['channel']] += r['area']
continue
# Assume out-of-bounds data has been zeroed, so we do not
# need to do r['data'][:r['length']] here.
# This simplifies downsampling.
w = r['data'].astype(np.int32)
if dt > r['dt']:
# Downsample
duration = samples_per_record * r['dt']
assert duration % dt == 0, "Cannot downsample fractionally"
# .astype here keeps numba happy ... ??
w = w.reshape(duration // dt, -1).sum(axis=1).astype(np.int32)
elif dt < r['dt']:
raise ValueError("Upsampling not yet implemented")
(r_start, r_end), (y_start, y_end) = strax.overlap_indices(
r['time'] // dt, len(w),
t0 // dt, n_samples)
# += is paranoid, data in individual channels should not overlap
# but... https://github.com/AxFoundation/strax/issues/119
y[y_start:y_end, r['channel']] += w[r_start:r_end]
return y
|
[
"strax.raw_to_records",
"numpy.errstate",
"numpy.zeros",
"strax.baseline",
"strax.zero_out_of_bounds",
"straxen.mini_analysis",
"warnings.warn",
"numpy.arange"
] |
[((111, 214), 'straxen.mini_analysis', 'straxen.mini_analysis', ([], {'requires': "('records',)", 'warn_beyond_sec': '(10)', 'default_time_selection': '"""touching"""'}), "(requires=('records',), warn_beyond_sec=10,\n default_time_selection='touching')\n", (132, 214), False, 'import straxen\n'), ((2754, 2864), 'straxen.mini_analysis', 'straxen.mini_analysis', ([], {'requires': "('raw_records',)", 'warn_beyond_sec': '(0.003)', 'default_time_selection': '"""touching"""'}), "(requires=('raw_records',), warn_beyond_sec=0.003,\n default_time_selection='touching')\n", (2775, 2864), False, 'import straxen\n'), ((2699, 2726), 'numpy.arange', 'np.arange', (['(wvm.shape[1] + 1)'], {}), '(wvm.shape[1] + 1)\n', (2708, 2726), True, 'import numpy as np\n'), ((3266, 3299), 'strax.raw_to_records', 'strax.raw_to_records', (['raw_records'], {}), '(raw_records)\n', (3286, 3299), False, 'import strax\n'), ((3304, 3355), 'strax.baseline', 'strax.baseline', (['records'], {'allow_sloppy_chunking': '(True)'}), '(records, allow_sloppy_chunking=True)\n', (3318, 3355), False, 'import strax\n'), ((3360, 3393), 'strax.zero_out_of_bounds', 'strax.zero_out_of_bounds', (['records'], {}), '(records)\n', (3384, 3393), False, 'import strax\n'), ((3989, 4038), 'numpy.zeros', 'np.zeros', (['(n_samples, n_channels)'], {'dtype': 'np.int32'}), '((n_samples, n_channels), dtype=np.int32)\n', (3997, 4038), True, 'import numpy as np\n'), ((1505, 1551), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (1516, 1551), True, 'import numpy as np\n'), ((2211, 2315), 'warnings.warn', 'warnings.warn', (['f"""Matrix would exceed max_samples {max_samples}, downsampling to dt = {dt} ns."""'], {}), "(\n f'Matrix would exceed max_samples {max_samples}, downsampling to dt = {dt} ns.'\n )\n", (2224, 2315), False, 'import warnings\n'), ((2626, 2653), 'numpy.arange', 'np.arange', (['(wvm.shape[0] + 1)'], {}), '(wvm.shape[0] + 1)\n', (2635, 2653), True, 'import numpy as np\n'), ((1649, 1687), 'numpy.arange', 'np.arange', (['(0)', '(record_duration + dt)', 'dt'], {}), '(0, record_duration + dt, dt)\n', (1658, 1687), True, 'import numpy as np\n')]
|
from collections import namedtuple
from cryptoconditions import crypto
CryptoKeypair = namedtuple('CryptoKeypair', ('signing_key', 'verifying_key'))
def generate_keypair():
"""Generates a cryptographic key pair.
Returns:
:class:`~bigchaindb_driver.crypto.CryptoKeypair`: A
:obj:`collections.namedtuple` with named fields
:attr:`~bigchaindb_driver.crypto.CryptoKeypair.signing_key` and
:attr:`~bigchaindb_driver.crypto.CryptoKeypair.verifying_key`.
"""
return CryptoKeypair(
*(k.decode() for k in crypto.ed25519_generate_key_pair()))
|
[
"cryptoconditions.crypto.ed25519_generate_key_pair",
"collections.namedtuple"
] |
[((90, 151), 'collections.namedtuple', 'namedtuple', (['"""CryptoKeypair"""', "('signing_key', 'verifying_key')"], {}), "('CryptoKeypair', ('signing_key', 'verifying_key'))\n", (100, 151), False, 'from collections import namedtuple\n'), ((559, 593), 'cryptoconditions.crypto.ed25519_generate_key_pair', 'crypto.ed25519_generate_key_pair', ([], {}), '()\n', (591, 593), False, 'from cryptoconditions import crypto\n')]
|
# Dependencies
from aurora import Controller, View, Forms
from models import Users, Notes
from aurora.security import login_required, get_session
from flask import request
from datetime import datetime
# The controller class
class NewNote(Controller):
# POST Method
@login_required(app='users')
def post(self):
# The required models
user = Users().read(where={'username':get_session('user')}).first()
notes = Notes()
# Form data
data = request.form
form = Forms(data)
# Valid form data
if form.validate():
# Collect form inputs
title = data.get('title')
content = data.get('content')
# Required fields
if not title or not content:
return {
'error': '<i class="fas fa-exclamation-triangle mr-1"></i> Form data is invalid!',
}, 400
# Everything is fine
# Insert new note into the database
data = {
'user_id': user['id'],
'title': title,
'content': content,
# 'date': datetime.now().strftime("%m-%d-%Y")
}
notes.create(data=data)
# Return the result
return {
'success': '<i class="fas fa-check-circle mr-1"></i> The new note created successfully!',
}, 200
# Invalid form data
else:
# Return the result
return {
'error': '<i class="fas fa-exclamation-triangle mr-1"></i> Form data is invalid!',
}, 400
# GET Method
@login_required(app='users')
def get(self):
# The required models
user = Users().read(where={'username':get_session('user')}).first()
notes = Notes().read(where={'user_id':user['id']}, order_by={'id':'DESC'}).all()
form = Forms()
return View('create', user=user, form=form)
|
[
"models.Notes",
"aurora.security.login_required",
"models.Users",
"aurora.Forms",
"aurora.View",
"aurora.security.get_session"
] |
[((277, 304), 'aurora.security.login_required', 'login_required', ([], {'app': '"""users"""'}), "(app='users')\n", (291, 304), False, 'from aurora.security import login_required, get_session\n'), ((1662, 1689), 'aurora.security.login_required', 'login_required', ([], {'app': '"""users"""'}), "(app='users')\n", (1676, 1689), False, 'from aurora.security import login_required, get_session\n'), ((447, 454), 'models.Notes', 'Notes', ([], {}), '()\n', (452, 454), False, 'from models import Users, Notes\n'), ((519, 530), 'aurora.Forms', 'Forms', (['data'], {}), '(data)\n', (524, 530), False, 'from aurora import Controller, View, Forms\n'), ((1920, 1927), 'aurora.Forms', 'Forms', ([], {}), '()\n', (1925, 1927), False, 'from aurora import Controller, View, Forms\n'), ((1952, 1988), 'aurora.View', 'View', (['"""create"""'], {'user': 'user', 'form': 'form'}), "('create', user=user, form=form)\n", (1956, 1988), False, 'from aurora import Controller, View, Forms\n'), ((370, 377), 'models.Users', 'Users', ([], {}), '()\n', (375, 377), False, 'from models import Users, Notes\n'), ((1754, 1761), 'models.Users', 'Users', ([], {}), '()\n', (1759, 1761), False, 'from models import Users, Notes\n'), ((1831, 1838), 'models.Notes', 'Notes', ([], {}), '()\n', (1836, 1838), False, 'from models import Users, Notes\n'), ((401, 420), 'aurora.security.get_session', 'get_session', (['"""user"""'], {}), "('user')\n", (412, 420), False, 'from aurora.security import login_required, get_session\n'), ((1785, 1804), 'aurora.security.get_session', 'get_session', (['"""user"""'], {}), "('user')\n", (1796, 1804), False, 'from aurora.security import login_required, get_session\n')]
|
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
data['Rating'].hist()
data = data[data['Rating']<=5]
data['Rating'].hist()
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = (total_null/data.isnull().count())*100
missing_data = pd.concat([total_null,percent_null],axis=1,keys=['Total','Percentage'])
print(missing_data)
data = data.dropna()
total_null_1 = data.isnull().sum()
percent_null_1 = (total_null_1/data.isnull().count())*100
missing_data_1 = pd.concat([total_null_1,percent_null_1],axis=1,keys=['Total','Percentage'])
print(missing_data_1)
# code ends here
# --------------
#Code starts here
a = sns.catplot(x='Category',y='Rating',data=data, kind="box", height = 10)
a.set_xticklabels(rotation=90)
a.set_titles('Rating vs Category [BoxPlot]')
#Code ends here
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
le = LabelEncoder()
#data['Installs'] = data['Installs'].str.replace(',','').str.replace('+','')
data['Installs'] = data['Installs'].apply(lambda x : x.replace(',','')).apply(lambda x : x.replace('+',''))
data['Installs'] =data['Installs'].astype(int)
print(data['Installs'])
data['Installs'] = le.fit_transform(data['Installs'])
a = sns.regplot(x="Installs", y="Rating" , data=data)
a.set_title('Rating vs Installs [RegPlot]')
#Code ends here
# --------------
#Code starts here
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
import seaborn as sns
#Code starts here
d=data['Price'].value_counts()
print(d)
data['Price']=data['Price'].apply(lambda x : x.replace('$',''))
d=data['Price'].value_counts()
print(d)
data['Price']=data['Price'].astype(float)
#le=LabelEncoder()
#data['Installs'] = le.fit_transform(data['Installs'])
y=sns.regplot(data=data,x='Price',y='Rating')
y.set_title('Rating vs Installs [RegPlot]')
#Code ends here
# --------------
#Code starts here
data['Genres']=data['Genres'].str.split(';').str[0]
#print(data['Genres'])
df=data[['Genres','Rating']]
gr_mean=df.groupby(['Genres'],as_index=False).mean()
gr_mean=gr_mean.sort_values(by=['Rating'])
gr_mean=pd.DataFrame(gr_mean)
print(gr_mean)#,gr_mean[-1,:])
#Code ends heree
# --------------
#Code starts here
import seaborn as sns
data['Last Updated'] = pd.to_datetime(data['Last Updated'])
print(data['Last Updated'].max())
max_date=data['Last Updated'].max()
data['Last Updated Days']=max_date-data['Last Updated']
data['Last Updated Days']=data['Last Updated Days'].dt.days
sns.regplot(data=data,x='Last Updated Days',y='Rating').set_title('Rating vs Last Updated [RegPlot]')
#Code ends here
|
[
"sklearn.preprocessing.LabelEncoder",
"seaborn.regplot",
"pandas.read_csv",
"seaborn.catplot",
"pandas.DataFrame",
"pandas.concat",
"pandas.to_datetime"
] |
[((142, 159), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (153, 159), True, 'import pandas as pd\n'), ((392, 467), 'pandas.concat', 'pd.concat', (['[total_null, percent_null]'], {'axis': '(1)', 'keys': "['Total', 'Percentage']"}), "([total_null, percent_null], axis=1, keys=['Total', 'Percentage'])\n", (401, 467), True, 'import pandas as pd\n'), ((616, 695), 'pandas.concat', 'pd.concat', (['[total_null_1, percent_null_1]'], {'axis': '(1)', 'keys': "['Total', 'Percentage']"}), "([total_null_1, percent_null_1], axis=1, keys=['Total', 'Percentage'])\n", (625, 695), True, 'import pandas as pd\n'), ((774, 845), 'seaborn.catplot', 'sns.catplot', ([], {'x': '"""Category"""', 'y': '"""Rating"""', 'data': 'data', 'kind': '"""box"""', 'height': '(10)'}), "(x='Category', y='Rating', data=data, kind='box', height=10)\n", (785, 845), True, 'import seaborn as sns\n'), ((1067, 1081), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1079, 1081), False, 'from sklearn.preprocessing import MinMaxScaler, LabelEncoder\n'), ((1396, 1444), 'seaborn.regplot', 'sns.regplot', ([], {'x': '"""Installs"""', 'y': '"""Rating"""', 'data': 'data'}), "(x='Installs', y='Rating', data=data)\n", (1407, 1444), True, 'import seaborn as sns\n'), ((1909, 1954), 'seaborn.regplot', 'sns.regplot', ([], {'data': 'data', 'x': '"""Price"""', 'y': '"""Rating"""'}), "(data=data, x='Price', y='Rating')\n", (1920, 1954), True, 'import seaborn as sns\n'), ((2263, 2284), 'pandas.DataFrame', 'pd.DataFrame', (['gr_mean'], {}), '(gr_mean)\n', (2275, 2284), True, 'import pandas as pd\n'), ((2417, 2453), 'pandas.to_datetime', 'pd.to_datetime', (["data['Last Updated']"], {}), "(data['Last Updated'])\n", (2431, 2453), True, 'import pandas as pd\n'), ((2641, 2698), 'seaborn.regplot', 'sns.regplot', ([], {'data': 'data', 'x': '"""Last Updated Days"""', 'y': '"""Rating"""'}), "(data=data, x='Last Updated Days', y='Rating')\n", (2652, 2698), True, 'import seaborn as sns\n')]
|
import numpy as np
from openpnm.algorithms import ReactiveTransport
from openpnm.models.physics import generic_source_term as gst
from openpnm.utils import logging
logger = logging.getLogger(__name__)
class ChargeConservation(ReactiveTransport):
r"""
A class to enforce charge conservation in ionic transport simulations.
Parameters
----------
network : OpenPNM Network object
The network on which this algorithm operates
project : OpenPNM Project object
Either a network or a project must be specified
name : string, optional
A unique name to give the object for easier identification. If not
given, one is generated.
"""
def __init__(self, settings={}, phase=None, **kwargs):
def_set = {'phase': None,
'quantity': 'pore.potential',
'conductance': 'throat.ionic_conductance',
'charge_conservation': 'electroneutrality',
'gui': {'setup': {'phase': None,
'quantity': '',
'conductance': '',
'charge_conservation': ''},
'set_rate_BC': {'pores': None,
'values': None},
'set_value_BC': {'pores': None,
'values': None},
'set_source': {'pores': None,
'propname': ''}
}
}
super().__init__(**kwargs)
self.settings.update(def_set)
self.settings.update(settings)
if phase is not None:
self.setup(phase=phase)
def setup(self, phase=None, quantity='', conductance='',
charge_conservation=None, **kwargs):
r"""
This method takes several arguments that are essential to running the
algorithm and adds them to the settings.
Parameters
----------
phase : OpenPNM Phase object
The phase on which the algorithm is to be run.
quantity : string
(default is ``'pore.mole_fraction'``) The name of the physical
quantity to be calculated.
conductance : string
(default is ``'throat.diffusive_conductance'``) The name of the
pore-scale transport conductance values. These are typically
calculated by a model attached to a *Physics* object associated
with the given *Phase*.
charge_conservation : string
The assumption adopted to enforce charge conservation when
performing ions transport simulations (default is
"electroneutrality").
Notes
-----
Any additional arguments are added to the ``settings`` dictionary of
the object.
"""
if phase:
self.settings['phase'] = phase.name
if quantity:
self.settings['quantity'] = quantity
if conductance:
self.settings['conductance'] = conductance
if charge_conservation:
self.settings['charge_conservation'] = charge_conservation
super().setup(**kwargs)
def _charge_conservation_eq_source_term(self, e_alg):
# Source term for Poisson or charge conservation (electroneutrality) eq
phase = self.project.phases()[self.settings['phase']]
Ps = (self['pore.all'] * np.isnan(self['pore.bc_value']) *
np.isnan(self['pore.bc_rate']))
mod = gst.charge_conservation
phys = self.project.find_physics(phase=phase)
phys[0].add_model(propname='pore.charge_conservation', model=mod,
phase=phase, p_alg=self, e_alg=e_alg,
assumption=self.settings['charge_conservation'])
self.set_source(propname='pore.charge_conservation', pores=Ps)
|
[
"numpy.isnan",
"openpnm.utils.logging.getLogger"
] |
[((173, 200), 'openpnm.utils.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (190, 200), False, 'from openpnm.utils import logging\n'), ((3600, 3630), 'numpy.isnan', 'np.isnan', (["self['pore.bc_rate']"], {}), "(self['pore.bc_rate'])\n", (3608, 3630), True, 'import numpy as np\n'), ((3552, 3583), 'numpy.isnan', 'np.isnan', (["self['pore.bc_value']"], {}), "(self['pore.bc_value'])\n", (3560, 3583), True, 'import numpy as np\n')]
|
from jno.util import interpret_configs
from jno.util import run_arduino_process
from jno.util import create_build_directory
from jno.util import get_common_parameters
from jno.util import verify_arduino_dir
from jno.util import verify_and_get_port
from jno.util import JnoException
from jno.commands.command import Command
import getopt
from colorama import Fore
class Upload(Command):
help_name = "Upload"
help_usage = "jno upload [-b, --board=] boardname [-p, --ports=] port [-v, --verbose]"
help_description = "Runs build and uploads to board. Without arguments, uses board/port defined locally/globally. " \
"If port is not defined, uses first available port. With -v, more info will be displayed during upload."
def run(self,argv,location):
jno_dict = interpret_configs()
verify_arduino_dir(jno_dict)
create_build_directory(jno_dict)
arg_list = self.perform_upload(argv,jno_dict)
run_arduino_process(arg_list)
# Create argument list for arduino build
def perform_upload(self,argv,jno_dict):
# assemble command query
# GOAL: <arduino exec> --upload <script> --board <board> --port <serial>
arg_list = [jno_dict["EXEC_SCRIPT"]]
# add common params - set pref
arg_list.extend(get_common_parameters(jno_dict))
# add upload params
arg_list.append("--upload")
arg_list.append(jno_dict["SKETCH_INO"])
try:
opts,args = getopt.getopt(argv, 'b:p:v',['board=','port=','verbose'])
except getopt.GetoptError as e:
raise JnoException(str(e))
for opt, arg in opts:
if opt in ("-b","--board"):
jno_dict["board"] = arg.strip()
elif opt in ("-p","--port"):
jno_dict["port"] = arg.strip()
elif opt in ("-v","--verbose"):
arg_list.append("--verbose")
# verify port or get first available
port = verify_and_get_port(jno_dict["port"])
if not port:
if jno_dict["port"] == "DEFAULT":
raise JnoException("no ports available")
raise JnoException("port does not exist: {}".format(jno_dict["port"]))
else:
if jno_dict["port"] == "DEFAULT":
print("{1}No port provided, using available port {0}{2}".format(port,Fore.YELLOW,Fore.RESET))
# add board params
arg_list.append("--board")
arg_list.append(self.formatBoard(jno_dict["board"],jno_dict))
# add port params
arg_list.append("--port")
arg_list.append(port)
return arg_list
|
[
"getopt.getopt",
"jno.util.interpret_configs",
"jno.util.get_common_parameters",
"jno.util.verify_arduino_dir",
"jno.util.verify_and_get_port",
"jno.util.JnoException",
"jno.util.run_arduino_process",
"jno.util.create_build_directory"
] |
[((769, 788), 'jno.util.interpret_configs', 'interpret_configs', ([], {}), '()\n', (786, 788), False, 'from jno.util import interpret_configs\n'), ((791, 819), 'jno.util.verify_arduino_dir', 'verify_arduino_dir', (['jno_dict'], {}), '(jno_dict)\n', (809, 819), False, 'from jno.util import verify_arduino_dir\n'), ((822, 854), 'jno.util.create_build_directory', 'create_build_directory', (['jno_dict'], {}), '(jno_dict)\n', (844, 854), False, 'from jno.util import create_build_directory\n'), ((905, 934), 'jno.util.run_arduino_process', 'run_arduino_process', (['arg_list'], {}), '(arg_list)\n', (924, 934), False, 'from jno.util import run_arduino_process\n'), ((1760, 1797), 'jno.util.verify_and_get_port', 'verify_and_get_port', (["jno_dict['port']"], {}), "(jno_dict['port'])\n", (1779, 1797), False, 'from jno.util import verify_and_get_port\n'), ((1213, 1244), 'jno.util.get_common_parameters', 'get_common_parameters', (['jno_dict'], {}), '(jno_dict)\n', (1234, 1244), False, 'from jno.util import get_common_parameters\n'), ((1364, 1424), 'getopt.getopt', 'getopt.getopt', (['argv', '"""b:p:v"""', "['board=', 'port=', 'verbose']"], {}), "(argv, 'b:p:v', ['board=', 'port=', 'verbose'])\n", (1377, 1424), False, 'import getopt\n'), ((1860, 1894), 'jno.util.JnoException', 'JnoException', (['"""no ports available"""'], {}), "('no ports available')\n", (1872, 1894), False, 'from jno.util import JnoException\n')]
|
import argparse
import json
import logging
import os
import torch
from transformers.file_utils import ModelOutput
from typing import Dict, Optional, Tuple
from torch.utils.data import DataLoader, SequentialSampler
from transformers.modeling_outputs import Seq2SeqLMOutput
import train_seq2seq_utils
import single_head_utils
import multi_head_utils
from torch import nn
from generation_utils_multi_attribute import GenerationMixinCustomCombined
from transformers import (
PreTrainedModel,
PreTrainedTokenizer,
BartConfig,
BartTokenizer
)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {"bart_mult_heads_2": (BartConfig,
multi_head_utils.ConditionalGenerationCustomBartMultHeads,
BartTokenizer),
}
class Seq2SeqLMOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values_1: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
past_key_values_2: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
class BartModelCombined(GenerationMixinCustomCombined, nn.Module):
def __init__(self, model1, model2, config: BartConfig):
super().__init__()
self.model1 = model1
self.model2 = model2
self.config = config
self.device = model2.device
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs_1=None,
encoder_outputs_2=None,
past_key_values_1=None,
past_key_values_2=None,
inputs_embeds=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=None,
use_mixed=False,
use_head_1=0,
use_head_2=0,
gate_prob=0.5,
):
args1 = {'input_ids': input_ids,
'attention_mask': attention_mask,
'decoder_input_ids': decoder_input_ids,
'decoder_attention_mask': decoder_attention_mask,
'head_mask': head_mask,
'decoder_head_mask': decoder_head_mask,
'cross_attn_head_mask': cross_attn_head_mask,
'encoder_outputs': encoder_outputs_1,
'past_key_values': past_key_values_1,
'inputs_embeds': inputs_embeds,
'use_cache': use_cache,
'output_attentions': False,
'output_hidden_states': False,
'return_dict': None,
'use_mixed': False,
'use_head': use_head_1,
}
out1 = self.model1(**args1)
softmax_0 = torch.exp(out1.logits)
args2 = {'input_ids': input_ids,
'attention_mask': attention_mask,
'decoder_input_ids': decoder_input_ids,
'decoder_attention_mask': decoder_attention_mask,
'head_mask': head_mask,
'decoder_head_mask': decoder_head_mask,
'cross_attn_head_mask': cross_attn_head_mask,
'encoder_outputs': encoder_outputs_2,
'past_key_values': past_key_values_2,
'inputs_embeds': inputs_embeds,
'use_cache': use_cache,
'output_attentions': output_attentions,
'output_hidden_states': output_hidden_states,
'return_dict': None,
'use_mixed': False,
'use_head': use_head_2,
}
out2 = self.model2(**args2)
softmax_1 = torch.exp(out2.logits)
softmax_0 = softmax_0 * gate_prob
softmax_1 = softmax_1 * (1 - gate_prob)
lm_logits = torch.log(softmax_0 + softmax_1)
return_output = Seq2SeqLMOutput(
logits=lm_logits,
past_key_values_1=out1.past_key_values,
past_key_values_2=out2.past_key_values)
return return_output
# unchanged
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past_1=None,
past_2=None,
attention_mask=None,
head_mask=None,
use_cache=None,
encoder_outputs_1=None,
encoder_outputs_2=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past_1 is not None and past_2 is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs_1": encoder_outputs_1,
"encoder_outputs_2": encoder_outputs_2,
"past_key_values_1": past_1,
"past_key_values_2": past_2,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def load_model(path):
args = json.load(open(path))
config_class, model_class = BartConfig, multi_head_utils.ConditionalGenerationCustomBartMultHeads
config = config_class.from_pretrained(args['path'])
model = model_class.from_pretrained(
args['path'],
from_tf=bool(".ckpt" in args['path']),
config=config)
return model, args, config
def evaluate(args, eval_dataset, model: PreTrainedModel, args1, args2, tokenizer: PreTrainedTokenizer,
suffix="") -> Dict:
eval_output_dir = args.output_dir
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
if args.generate:
f_out = open(os.path.join(eval_output_dir, 'test_out%s.txt' % suffix), 'w')
print(eval_output_dir)
k = 0
with torch.no_grad():
model.eval()
for batch in eval_dataloader:
batch = tuple(t.to(args.device) for t in batch)
input_ids, input_attention_mask, decoder_ids = batch[0], batch[1], batch[2]
for j in range(input_ids.shape[0]):
gold = tokenizer.decode(decoder_ids[j], skip_special_tokens=True)
input = tokenizer.decode(input_ids[j], skip_special_tokens=True)
input_args = {'input_ids': input_ids[j].unsqueeze(0),
'attention_mask': input_attention_mask[j].unsqueeze(0), 'num_beams': 6,
'length_penalty': 2, 'no_repeat_ngram_size': 3, 'max_length': 200, 'min_length': 12,
'top_k': 30, 'top_p': 0.5, 'do_sample': True,
'decoder_start_token_id': tokenizer.bos_token_id, 'num_return_sequences': 1,
'gate_prob': args.gate_probability, 'use_head_1': args1['use_head'],
'use_head_2': args2['use_head']}
gen = model.generate(**input_args)
gen = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in
gen]
# gen = gen[0]
print(gen[0].strip())
f_out.write(input + '\n')
f_out.write(gold + '\n')
for g in gen:
f_out.write(g.strip() + '\n')
f_out.write('\n')
k += 1
if k > 1000:
break
f_out.close()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_type",
default=None,
type=str,
help="base model, used to load tokenizer",
)
parser.add_argument(
"--model_1_config",
default=None,
type=str,
help="Path to model 1 config",
)
parser.add_argument(
"--model_2_config",
default=None,
type=str,
required=True,
help="Path to model 2 config",
)
parser.add_argument(
"--test_data_file",
default=None,
type=str,
required=True,
help="Evaluation data file to evaluate the perplexity on (a text file).",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--max_seq_length",
default=1024,
type=int,
help="The maximum total input sequence length after tokenization.",
)
parser.add_argument(
"--max_decoder_length",
default=128,
type=int,
help="The maximum total decoder sequence length after tokenization.",
)
parser.add_argument("--per_gpu_eval_batch_size", default=32, type=int, help="Batch size evaluation.", )
parser.add_argument("--gpu_device", type=int, default=0, help="gpu device")
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached data sets", )
# custom flags
parser.add_argument("--generate", action="store_true", help="Generate summaries for dev set", )
parser.add_argument("--dump_posteriors", action="store_true", help="Dump posterior probs at intermediate steps", )
parser.add_argument("--gate_probability", type=float, default=None, help="gate prob")
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
args.n_gpu = 1
device = torch.device("cuda", args.gpu_device)
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
filename=os.path.join(args.output_dir, 'model.log')
)
# Set seed
model1, args1, config = load_model(args.model_1_config)
model1.to(args.device)
model2, args2, _ = load_model(args.model_2_config)
model2.to(args.device)
f_out = open(os.path.join(args.output_dir, 'model_configs.json'), 'w')
json.dump(args1, f_out)
f_out.write('\n')
json.dump(args2, f_out)
f_out.write('\n')
json.dump({'gate_prob': args.gate_probability}, f_out)
f_out.write('\n')
f_out.close()
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
model = BartModelCombined(model1, model2, config)
eval_dataset = train_seq2seq_utils.load_and_cache_examples(args, tokenizer, 'test')
evaluate(args, eval_dataset, model, args1, args2, tokenizer, 'final')
logger.info("Training/evaluation parameters %s", args)
if __name__ == "__main__":
main()
|
[
"logging.getLogger",
"os.path.exists",
"torch.log",
"argparse.ArgumentParser",
"os.makedirs",
"train_seq2seq_utils.load_and_cache_examples",
"transformers.modeling_outputs.Seq2SeqLMOutput",
"torch.utils.data.SequentialSampler",
"os.path.join",
"torch.exp",
"torch.no_grad",
"torch.utils.data.DataLoader",
"transformers.BartTokenizer.from_pretrained",
"json.dump",
"torch.device"
] |
[((564, 591), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (581, 591), False, 'import logging\n'), ((5969, 6000), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_dataset'], {}), '(eval_dataset)\n', (5986, 6000), False, 'from torch.utils.data import DataLoader, SequentialSampler\n'), ((6023, 6102), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_dataset'], {'sampler': 'eval_sampler', 'batch_size': 'args.eval_batch_size'}), '(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n', (6033, 6102), False, 'from torch.utils.data import DataLoader, SequentialSampler\n'), ((8211, 8236), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8234, 8236), False, 'import argparse\n'), ((10279, 10316), 'torch.device', 'torch.device', (['"""cuda"""', 'args.gpu_device'], {}), "('cuda', args.gpu_device)\n", (10291, 10316), False, 'import torch\n'), ((10858, 10881), 'json.dump', 'json.dump', (['args1', 'f_out'], {}), '(args1, f_out)\n', (10867, 10881), False, 'import json\n'), ((10908, 10931), 'json.dump', 'json.dump', (['args2', 'f_out'], {}), '(args2, f_out)\n', (10917, 10931), False, 'import json\n'), ((10958, 11012), 'json.dump', 'json.dump', (["{'gate_prob': args.gate_probability}", 'f_out'], {}), "({'gate_prob': args.gate_probability}, f_out)\n", (10967, 11012), False, 'import json\n'), ((11070, 11122), 'transformers.BartTokenizer.from_pretrained', 'BartTokenizer.from_pretrained', (['"""facebook/bart-large"""'], {}), "('facebook/bart-large')\n", (11099, 11122), False, 'from transformers import PreTrainedModel, PreTrainedTokenizer, BartConfig, BartTokenizer\n'), ((11197, 11265), 'train_seq2seq_utils.load_and_cache_examples', 'train_seq2seq_utils.load_and_cache_examples', (['args', 'tokenizer', '"""test"""'], {}), "(args, tokenizer, 'test')\n", (11240, 11265), False, 'import train_seq2seq_utils\n'), ((2919, 2941), 'torch.exp', 'torch.exp', (['out1.logits'], {}), '(out1.logits)\n', (2928, 2941), False, 'import torch\n'), ((3832, 3854), 'torch.exp', 'torch.exp', (['out2.logits'], {}), '(out2.logits)\n', (3841, 3854), False, 'import torch\n'), ((3967, 3999), 'torch.log', 'torch.log', (['(softmax_0 + softmax_1)'], {}), '(softmax_0 + softmax_1)\n', (3976, 3999), False, 'import torch\n'), ((4024, 4141), 'transformers.modeling_outputs.Seq2SeqLMOutput', 'Seq2SeqLMOutput', ([], {'logits': 'lm_logits', 'past_key_values_1': 'out1.past_key_values', 'past_key_values_2': 'out2.past_key_values'}), '(logits=lm_logits, past_key_values_1=out1.past_key_values,\n past_key_values_2=out2.past_key_values)\n', (4039, 4141), False, 'from transformers.modeling_outputs import Seq2SeqLMOutput\n'), ((5802, 5833), 'os.path.exists', 'os.path.exists', (['eval_output_dir'], {}), '(eval_output_dir)\n', (5816, 5833), False, 'import os\n'), ((5843, 5871), 'os.makedirs', 'os.makedirs', (['eval_output_dir'], {}), '(eval_output_dir)\n', (5854, 5871), False, 'import os\n'), ((10176, 10207), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (10190, 10207), False, 'import os\n'), ((10217, 10245), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (10228, 10245), False, 'import os\n'), ((10796, 10847), 'os.path.join', 'os.path.join', (['args.output_dir', '"""model_configs.json"""'], {}), "(args.output_dir, 'model_configs.json')\n", (10808, 10847), False, 'import os\n'), ((6327, 6383), 'os.path.join', 'os.path.join', (['eval_output_dir', "('test_out%s.txt' % suffix)"], {}), "(eval_output_dir, 'test_out%s.txt' % suffix)\n", (6339, 6383), False, 'import os\n'), ((6449, 6464), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6462, 6464), False, 'import torch\n'), ((10543, 10585), 'os.path.join', 'os.path.join', (['args.output_dir', '"""model.log"""'], {}), "(args.output_dir, 'model.log')\n", (10555, 10585), False, 'import os\n')]
|
# Copyright 2019 The MACE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import os
import copy
import yaml
from enum import Enum
from utils.util import mace_check
from utils.util import MaceLogger
from py_proto import mace_pb2
CPP_KEYWORDS = [
'alignas', 'alignof', 'and', 'and_eq', 'asm', 'atomic_cancel',
'atomic_commit', 'atomic_noexcept', 'auto', 'bitand', 'bitor',
'bool', 'break', 'case', 'catch', 'char', 'char16_t', 'char32_t',
'class', 'compl', 'concept', 'const', 'constexpr', 'const_cast',
'continue', 'co_await', 'co_return', 'co_yield', 'decltype', 'default',
'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit',
'export', 'extern', 'false', 'float', 'for', 'friend', 'goto', 'if',
'import', 'inline', 'int', 'long', 'module', 'mutable', 'namespace',
'new', 'noexcept', 'not', 'not_eq', 'nullptr', 'operator', 'or', 'or_eq',
'private', 'protected', 'public', 'register', 'reinterpret_cast',
'requires', 'return', 'short', 'signed', 'sizeof', 'static',
'static_assert', 'static_cast', 'struct', 'switch', 'synchronized',
'template', 'this', 'thread_local', 'throw', 'true', 'try', 'typedef',
'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void',
'volatile', 'wchar_t', 'while', 'xor', 'xor_eq', 'override', 'final',
'transaction_safe', 'transaction_safe_dynamic', 'if', 'elif', 'else',
'endif', 'defined', 'ifdef', 'ifndef', 'define', 'undef', 'include',
'line', 'error', 'pragma',
]
def sanitize_load(s):
# do not let yaml parse ON/OFF to boolean
for w in ["ON", "OFF", "on", "off"]:
s = re.sub(r":\s+" + w + "$", r": '" + w + "'", s)
# sub ${} to env value
s = re.sub(r"\${(\w+)}", lambda x: os.environ[x.group(1)], s)
return yaml.load(s)
def parse(path):
with open(path) as f:
config = sanitize_load(f.read())
return config
def parse_device_info(path):
conf = parse(path)
return conf["devices"]
class ModelKeys(object):
platform = "platform"
runtime = "runtime"
models = 'models'
graph_optimize_options = "graph_optimize_options"
input_tensors = "input_tensors"
input_shapes = "input_shapes"
input_data_types = "input_data_types"
input_data_formats = "input_data_formats"
input_ranges = "input_ranges"
output_tensors = "output_tensors"
output_shapes = "output_shapes"
output_data_types = "output_data_types"
output_data_formats = "output_data_formats"
check_tensors = "check_tensors"
check_shapes = "check_shapes"
model_file_path = "model_file_path"
model_sha256_checksum = "model_sha256_checksum"
weight_file_path = "weight_file_path"
weight_sha256_checksum = "weight_sha256_checksum"
quantize_range_file = "quantize_range_file"
quantize = "quantize"
quantize_schema = "quantize_schema"
quantize_large_weights = "quantize_large_weights"
quantize_stat = "quantize_stat"
change_concat_ranges = "change_concat_ranges"
winograd = "winograd"
cl_mem_type = "cl_mem_type"
data_type = "data_type"
subgraphs = "subgraphs"
validation_inputs_data = "validation_inputs_data"
class DataFormat(Enum):
NONE = 0
NHWC = 1
NCHW = 2
HWIO = 100
OIHW = 101
HWOI = 102
OHWI = 103
AUTO = 1000
def parse_data_format(str):
str = str.upper()
mace_check(str in [e.name for e in DataFormat],
"unknown data format %s" % str)
return DataFormat[str]
class DeviceType(Enum):
CPU = 0
GPU = 2
HEXAGON = 3
HTA = 4
APU = 5
CPU_GPU = 100
DEVICE_MAP = {
"cpu": DeviceType.CPU,
"gpu": DeviceType.GPU,
"hexagon": DeviceType.HEXAGON,
"dsp": DeviceType.HEXAGON,
"hta": DeviceType.HTA,
"apu": DeviceType.APU,
"cpu+gpu": DeviceType.CPU_GPU
}
def parse_device_type(str):
mace_check(str in DEVICE_MAP, "unknown device %s" % str)
return DEVICE_MAP[str]
class Platform(Enum):
TENSORFLOW = 0
CAFFE = 1
ONNX = 2
MEGENGINE = 3
def parse_platform(str):
str = str.upper()
mace_check(str in [e.name for e in Platform],
"unknown platform %s" % str)
return Platform[str]
DATA_TYPE_MAP = {
'float32': mace_pb2.DT_FLOAT,
'int32': mace_pb2.DT_INT32,
}
def parse_data_type(str):
if str == "float32":
return mace_pb2.DT_FLOAT
elif str == "int32":
return mace_pb2.DT_INT32
else:
mace_check(False, "data type %s not supported" % str)
def parse_internal_data_type(str):
if str == 'fp32_fp32':
return mace_pb2.DT_FLOAT
elif str == 'bf16_fp32':
return mace_pb2.DT_BFLOAT16
else:
return mace_pb2.DT_HALF
def to_list(x):
if isinstance(x, list):
return x
else:
return [x]
def parse_int_array(xs):
if len(xs) is 0:
return [1]
return [int(x) for x in xs.split(",")]
def parse_float_array(xs):
return [float(x) for x in xs.split(",")]
def normalize_model_config(conf):
conf = copy.deepcopy(conf)
if ModelKeys.subgraphs in conf:
subgraph = conf[ModelKeys.subgraphs][0]
del conf[ModelKeys.subgraphs]
conf.update(subgraph)
conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform])
conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime])
if ModelKeys.quantize in conf and conf[ModelKeys.quantize] == 1:
conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT
else:
if ModelKeys.data_type in conf:
conf[ModelKeys.data_type] = parse_internal_data_type(
conf[ModelKeys.data_type])
else:
conf[ModelKeys.data_type] = mace_pb2.DT_HALF
# parse input
conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors])
conf[ModelKeys.input_tensors] = [str(i) for i in
conf[ModelKeys.input_tensors]]
input_count = len(conf[ModelKeys.input_tensors])
conf[ModelKeys.input_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.input_shapes])]
mace_check(
len(conf[ModelKeys.input_shapes]) == input_count,
"input node count and shape count do not match")
input_data_types = [parse_data_type(dt) for dt in
to_list(conf.get(ModelKeys.input_data_types,
["float32"]))]
if len(input_data_types) == 1 and input_count > 1:
input_data_types = [input_data_types[0]] * input_count
mace_check(len(input_data_types) == input_count,
"the number of input_data_types should be "
"the same as input tensors")
conf[ModelKeys.input_data_types] = input_data_types
input_data_formats = [parse_data_format(df) for df in
to_list(conf.get(ModelKeys.input_data_formats,
["NHWC"]))]
if len(input_data_formats) == 1 and input_count > 1:
input_data_formats = [input_data_formats[0]] * input_count
mace_check(len(input_data_formats) == input_count,
"the number of input_data_formats should be "
"the same as input tensors")
conf[ModelKeys.input_data_formats] = input_data_formats
input_ranges = [parse_float_array(r) for r in
to_list(conf.get(ModelKeys.input_ranges,
["-1.0,1.0"]))]
if len(input_ranges) == 1 and input_count > 1:
input_ranges = [input_ranges[0]] * input_count
mace_check(len(input_ranges) == input_count,
"the number of input_ranges should be "
"the same as input tensors")
conf[ModelKeys.input_ranges] = input_ranges
# parse output
conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors])
conf[ModelKeys.output_tensors] = [str(i) for i in
conf[ModelKeys.output_tensors]]
output_count = len(conf[ModelKeys.output_tensors])
conf[ModelKeys.output_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.output_shapes])]
mace_check(len(conf[ModelKeys.output_tensors]) == output_count,
"output node count and shape count do not match")
output_data_types = [parse_data_type(dt) for dt in
to_list(conf.get(ModelKeys.output_data_types,
["float32"]))]
if len(output_data_types) == 1 and output_count > 1:
output_data_types = [output_data_types[0]] * output_count
mace_check(len(output_data_types) == output_count,
"the number of output_data_types should be "
"the same as output tensors")
conf[ModelKeys.output_data_types] = output_data_types
output_data_formats = [parse_data_format(df) for df in
to_list(conf.get(ModelKeys.output_data_formats,
["NHWC"]))]
if len(output_data_formats) == 1 and output_count > 1:
output_data_formats = [output_data_formats[0]] * output_count
mace_check(len(output_data_formats) == output_count,
"the number of output_data_formats should be "
"the same as output tensors")
conf[ModelKeys.output_data_formats] = output_data_formats
if ModelKeys.check_tensors in conf:
conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors])
conf[ModelKeys.check_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.check_shapes])]
mace_check(len(conf[ModelKeys.check_tensors]) == len(
conf[ModelKeys.check_shapes]),
"check tensors count and shape count do not match.")
MaceLogger.summary(conf)
return conf
|
[
"utils.util.MaceLogger.summary",
"yaml.load",
"utils.util.mace_check",
"copy.deepcopy",
"re.sub"
] |
[((2429, 2441), 'yaml.load', 'yaml.load', (['s'], {}), '(s)\n', (2438, 2441), False, 'import yaml\n'), ((4016, 4095), 'utils.util.mace_check', 'mace_check', (['(str in [e.name for e in DataFormat])', "('unknown data format %s' % str)"], {}), "(str in [e.name for e in DataFormat], 'unknown data format %s' % str)\n", (4026, 4095), False, 'from utils.util import mace_check\n'), ((4507, 4563), 'utils.util.mace_check', 'mace_check', (['(str in DEVICE_MAP)', "('unknown device %s' % str)"], {}), "(str in DEVICE_MAP, 'unknown device %s' % str)\n", (4517, 4563), False, 'from utils.util import mace_check\n'), ((4732, 4806), 'utils.util.mace_check', 'mace_check', (['(str in [e.name for e in Platform])', "('unknown platform %s' % str)"], {}), "(str in [e.name for e in Platform], 'unknown platform %s' % str)\n", (4742, 4806), False, 'from utils.util import mace_check\n'), ((5678, 5697), 'copy.deepcopy', 'copy.deepcopy', (['conf'], {}), '(conf)\n', (5691, 5697), False, 'import copy\n'), ((10492, 10516), 'utils.util.MaceLogger.summary', 'MaceLogger.summary', (['conf'], {}), '(conf)\n', (10510, 10516), False, 'from utils.util import MaceLogger\n'), ((2277, 2322), 're.sub', 're.sub', (["(':\\\\s+' + w + '$')", '(": \'" + w + "\'")', 's'], {}), '(\':\\\\s+\' + w + \'$\', ": \'" + w + "\'", s)\n', (2283, 2322), False, 'import re\n'), ((5097, 5150), 'utils.util.mace_check', 'mace_check', (['(False)', "('data type %s not supported' % str)"], {}), "(False, 'data type %s not supported' % str)\n", (5107, 5150), False, 'from utils.util import mace_check\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import multiprocessing
import numpy as np
import os
import random
import torch
import torch.backends.cudnn as cudnn
from kerosene.configs.configs import RunConfiguration, DatasetConfiguration
from kerosene.configs.parsers import YamlConfigurationParser
from kerosene.loggers.visdom import PlotType, PlotFrequency
from kerosene.loggers.visdom.config import VisdomConfiguration
from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData
from kerosene.training.trainers import ModelTrainerFactory
from samitorch.inputs.utils import augmented_sample_collate
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import DataLoader
from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType
from deepNormalize.factories.customModelFactory import CustomModelFactory
from deepNormalize.factories.customTrainerFactory import TrainerFactory
from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory
from deepNormalize.nn.criterions import CustomCriterionFactory
from deepNormalize.utils.constants import *
from deepNormalize.utils.image_slicer import ImageReconstructor
cudnn.benchmark = True
cudnn.enabled = True
np.random.seed(42)
random.seed(42)
if __name__ == '__main__':
# Basic settings
logging.basicConfig(level=logging.INFO)
torch.set_num_threads(multiprocessing.cpu_count())
torch.set_num_interop_threads(multiprocessing.cpu_count())
args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args()
# Create configurations.
run_config = RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level)
model_trainer_configs, training_config = YamlConfigurationParser.parse(args.config_file)
if not isinstance(model_trainer_configs, list):
model_trainer_configs = [model_trainer_configs]
dataset_configs = YamlConfigurationParser.parse_section(args.config_file, "dataset")
dataset_configs = {k: DatasetConfiguration(v) for k, v, in dataset_configs.items()}
data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file, "data_augmentation")
config_html = [training_config.to_html(), list(map(lambda config: config.to_html(), dataset_configs.values())),
list(map(lambda config: config.to_html(), model_trainer_configs))]
# Prepare the data.
train_datasets = list()
valid_datasets = list()
test_datasets = list()
reconstruction_datasets = list()
iSEG_train = None
iSEG_CSV = None
MRBrainS_train = None
MRBrainS_CSV = None
ABIDE_train = None
ABIDE_CSV = None
iSEG_augmentation_strategy = None
MRBrainS_augmentation_strategy = None
ABIDE_augmentation_strategy = None
# Initialize the model trainers
model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(),
criterion_factory=CustomCriterionFactory())
model_trainers = model_trainer_factory.create(model_trainer_configs)
if not isinstance(model_trainers, list):
model_trainers = [model_trainers]
# Create datasets
if dataset_configs.get("iSEG", None) is not None:
iSEG_train, iSEG_valid, iSEG_test, iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["iSEG"].path,
modalities=dataset_configs["iSEG"].modalities,
dataset_id=ISEG_ID,
test_size=dataset_configs["iSEG"].validation_split,
max_subjects=dataset_configs["iSEG"].max_subjects,
max_num_patches=dataset_configs["iSEG"].max_num_patches,
augment=dataset_configs["iSEG"].augment,
patch_size=dataset_configs["iSEG"].patch_size,
step=dataset_configs["iSEG"].step,
test_patch_size=dataset_configs["iSEG"].test_patch_size,
test_step=dataset_configs["iSEG"].test_step,
data_augmentation_config=data_augmentation_config)
train_datasets.append(iSEG_train)
valid_datasets.append(iSEG_valid)
reconstruction_datasets.append(iSEG_reconstruction)
if dataset_configs.get("MRBrainS", None) is not None:
MRBrainS_train, MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["MRBrainS"].path,
modalities=dataset_configs["MRBrainS"].modalities,
dataset_id=MRBRAINS_ID,
test_size=dataset_configs["MRBrainS"].validation_split,
max_subjects=dataset_configs["MRBrainS"].max_subjects,
max_num_patches=dataset_configs["MRBrainS"].max_num_patches,
augment=dataset_configs["MRBrainS"].augment,
patch_size=dataset_configs["MRBrainS"].patch_size,
step=dataset_configs["MRBrainS"].step,
test_patch_size=dataset_configs["MRBrainS"].test_patch_size,
test_step=dataset_configs["MRBrainS"].test_step,
data_augmentation_config=data_augmentation_config)
test_datasets.append(MRBrainS_test)
reconstruction_datasets.append(MRBrainS_reconstruction)
if dataset_configs.get("ABIDE", None) is not None:
ABIDE_train, ABIDE_valid, ABIDE_test, ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["ABIDE"].path,
modalities=dataset_configs["ABIDE"].modalities,
dataset_id=ABIDE_ID,
sites=dataset_configs["ABIDE"].sites,
max_subjects=dataset_configs["ABIDE"].max_subjects,
test_size=dataset_configs["ABIDE"].validation_split,
max_num_patches=dataset_configs["ABIDE"].max_num_patches,
augment=dataset_configs["ABIDE"].augment,
patch_size=dataset_configs["ABIDE"].patch_size,
step=dataset_configs["ABIDE"].step,
test_patch_size=dataset_configs["ABIDE"].test_patch_size,
test_step=dataset_configs["ABIDE"].test_step,
data_augmentation_config=data_augmentation_config)
train_datasets.append(ABIDE_train)
valid_datasets.append(ABIDE_valid)
test_datasets.append(ABIDE_test)
reconstruction_datasets.append(ABIDE_reconstruction)
if len(list(dataset_configs.keys())) == 2:
segmentation_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs["iSEG"].test_step,
models=[model_trainers[0]],
segment=True,
batch_size=8)
input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50)
gt_reconstructor = ImageReconstructor(
[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
is_ground_truth=True,
batch_size=50)
if dataset_configs["iSEG"].augment:
augmented_input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
alpha=data_augmentation_config["test"]["bias_field"]["alpha"][0],
prob_bias=data_augmentation_config["test"]["bias_field"]["prob_bias"],
snr=data_augmentation_config["test"]["noise"]["snr"],
prob_noise=data_augmentation_config["test"]["noise"]["prob_noise"])
else:
augmented_input_reconstructor = None
augmented_normalized_input_reconstructor = None
else:
segmentation_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
models=[model_trainers[0]],
normalize_and_segment=True,
batch_size=4)
input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50)
gt_reconstructor = ImageReconstructor(
[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0],
ABIDE_reconstruction._target_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
is_ground_truth=True)
if dataset_configs["iSEG"].augment:
augmented_input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
alpha=data_augmentation_config["test"]["bias_field"]["alpha"][0],
prob_bias=data_augmentation_config["test"]["bias_field"]["prob_bias"],
snr=data_augmentation_config["test"]["noise"]["snr"],
prob_noise=data_augmentation_config["test"]["noise"]["prob_noise"])
else:
augmented_input_reconstructor = None
augmented_normalized_input_reconstructor = None
# Concat datasets.
if len(dataset_configs) > 1:
train_dataset = torch.utils.data.ConcatDataset(train_datasets)
valid_dataset = torch.utils.data.ConcatDataset(valid_datasets)
test_dataset = torch.utils.data.ConcatDataset(test_datasets)
else:
train_dataset = train_datasets[0]
valid_dataset = valid_datasets[0]
test_dataset = test_datasets[0]
# Create loaders.
dataloaders = list(map(lambda dataset: DataLoader(dataset,
training_config.batch_size,
sampler=None,
shuffle=True,
num_workers=args.num_workers,
collate_fn=augmented_sample_collate,
drop_last=True,
pin_memory=True),
[train_dataset, valid_dataset, test_dataset]))
# Initialize the loggers.
visdom_config = VisdomConfiguration.from_yml(args.config_file, "visdom")
exp = args.config_file.split("/")[-3:]
if visdom_config.save_destination is not None:
save_folder = visdom_config.save_destination + os.path.join(exp[0], exp[1],
os.path.basename(
os.path.normpath(visdom_config.env)))
else:
save_folder = "saves/{}".format(os.path.basename(os.path.normpath(visdom_config.env)))
[os.makedirs("{}/{}".format(save_folder, model), exist_ok=True)
for model in
["Discriminator", "Generator", "Segmenter"]]
visdom_logger = VisdomLogger(visdom_config)
visdom_logger(VisdomData("Experiment", "Experiment Config", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None,
config_html))
visdom_logger(VisdomData("Experiment", "Patch count", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH,
x=[len(iSEG_train) if iSEG_train is not None else 0,
len(MRBrainS_train) if MRBrainS_train is not None else 0,
len(ABIDE_train) if ABIDE_train is not None else 0],
y=["iSEG", "MRBrainS", "ABIDE"], params={"opts": {"title": "Patch count"}}))
trainer = TrainerFactory(training_config.trainer).create(training_config,
model_trainers,
dataloaders,
reconstruction_datasets,
None,
input_reconstructor,
segmentation_reconstructor,
augmented_input_reconstructor,
None,
gt_reconstructor,
run_config,
dataset_configs,
save_folder,
visdom_logger)
trainer.train(training_config.nb_epochs)
|
[
"torch.utils.data.ConcatDataset",
"deepNormalize.factories.customTrainerFactory.TrainerFactory",
"deepNormalize.utils.image_slicer.ImageReconstructor",
"multiprocessing.cpu_count",
"kerosene.configs.configs.DatasetConfiguration",
"kerosene.loggers.visdom.visdom.VisdomLogger",
"deepNormalize.inputs.datasets.MRBrainSSliceDatasetFactory.create_train_valid_test",
"kerosene.configs.configs.RunConfiguration",
"deepNormalize.inputs.datasets.iSEGSliceDatasetFactory.create_train_valid_test",
"deepNormalize.config.parsers.ArgsParserFactory.create_parser",
"deepNormalize.nn.criterions.CustomCriterionFactory",
"torch.utils.data.dataloader.DataLoader",
"os.path.normpath",
"numpy.random.seed",
"kerosene.loggers.visdom.visdom.VisdomData",
"kerosene.loggers.visdom.config.VisdomConfiguration.from_yml",
"deepNormalize.factories.customModelFactory.CustomModelFactory",
"deepNormalize.inputs.datasets.ABIDESliceDatasetFactory.create_train_valid_test",
"logging.basicConfig",
"kerosene.configs.parsers.YamlConfigurationParser.parse",
"kerosene.configs.parsers.YamlConfigurationParser.parse_section",
"random.seed"
] |
[((1889, 1907), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1903, 1907), True, 'import numpy as np\n'), ((1908, 1923), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (1919, 1923), False, 'import random\n'), ((1977, 2016), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1996, 2016), False, 'import logging\n'), ((2269, 2373), 'kerosene.configs.configs.RunConfiguration', 'RunConfiguration', ([], {'use_amp': 'args.use_amp', 'local_rank': 'args.local_rank', 'amp_opt_level': 'args.amp_opt_level'}), '(use_amp=args.use_amp, local_rank=args.local_rank,\n amp_opt_level=args.amp_opt_level)\n', (2285, 2373), False, 'from kerosene.configs.configs import RunConfiguration, DatasetConfiguration\n'), ((2415, 2462), 'kerosene.configs.parsers.YamlConfigurationParser.parse', 'YamlConfigurationParser.parse', (['args.config_file'], {}), '(args.config_file)\n', (2444, 2462), False, 'from kerosene.configs.parsers import YamlConfigurationParser\n'), ((2593, 2659), 'kerosene.configs.parsers.YamlConfigurationParser.parse_section', 'YamlConfigurationParser.parse_section', (['args.config_file', '"""dataset"""'], {}), "(args.config_file, 'dataset')\n", (2630, 2659), False, 'from kerosene.configs.parsers import YamlConfigurationParser\n'), ((2779, 2855), 'kerosene.configs.parsers.YamlConfigurationParser.parse_section', 'YamlConfigurationParser.parse_section', (['args.config_file', '"""data_augmentation"""'], {}), "(args.config_file, 'data_augmentation')\n", (2816, 2855), False, 'from kerosene.configs.parsers import YamlConfigurationParser\n'), ((12352, 12408), 'kerosene.loggers.visdom.config.VisdomConfiguration.from_yml', 'VisdomConfiguration.from_yml', (['args.config_file', '"""visdom"""'], {}), "(args.config_file, 'visdom')\n", (12380, 12408), False, 'from kerosene.loggers.visdom.config import VisdomConfiguration\n'), ((13045, 13072), 'kerosene.loggers.visdom.visdom.VisdomLogger', 'VisdomLogger', (['visdom_config'], {}), '(visdom_config)\n', (13057, 13072), False, 'from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData\n'), ((2043, 2070), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (2068, 2070), False, 'import multiprocessing\n'), ((2106, 2133), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (2131, 2133), False, 'import multiprocessing\n'), ((2686, 2709), 'kerosene.configs.configs.DatasetConfiguration', 'DatasetConfiguration', (['v'], {}), '(v)\n', (2706, 2709), False, 'from kerosene.configs.configs import RunConfiguration, DatasetConfiguration\n'), ((3975, 4604), 'deepNormalize.inputs.datasets.iSEGSliceDatasetFactory.create_train_valid_test', 'iSEGSliceDatasetFactory.create_train_valid_test', ([], {'source_dir': "dataset_configs['iSEG'].path", 'modalities': "dataset_configs['iSEG'].modalities", 'dataset_id': 'ISEG_ID', 'test_size': "dataset_configs['iSEG'].validation_split", 'max_subjects': "dataset_configs['iSEG'].max_subjects", 'max_num_patches': "dataset_configs['iSEG'].max_num_patches", 'augment': "dataset_configs['iSEG'].augment", 'patch_size': "dataset_configs['iSEG'].patch_size", 'step': "dataset_configs['iSEG'].step", 'test_patch_size': "dataset_configs['iSEG'].test_patch_size", 'test_step': "dataset_configs['iSEG'].test_step", 'data_augmentation_config': 'data_augmentation_config'}), "(source_dir=dataset_configs[\n 'iSEG'].path, modalities=dataset_configs['iSEG'].modalities, dataset_id\n =ISEG_ID, test_size=dataset_configs['iSEG'].validation_split,\n max_subjects=dataset_configs['iSEG'].max_subjects, max_num_patches=\n dataset_configs['iSEG'].max_num_patches, augment=dataset_configs['iSEG'\n ].augment, patch_size=dataset_configs['iSEG'].patch_size, step=\n dataset_configs['iSEG'].step, test_patch_size=dataset_configs['iSEG'].\n test_patch_size, test_step=dataset_configs['iSEG'].test_step,\n data_augmentation_config=data_augmentation_config)\n", (4022, 4604), False, 'from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory\n'), ((4996, 5678), 'deepNormalize.inputs.datasets.MRBrainSSliceDatasetFactory.create_train_valid_test', 'MRBrainSSliceDatasetFactory.create_train_valid_test', ([], {'source_dir': "dataset_configs['MRBrainS'].path", 'modalities': "dataset_configs['MRBrainS'].modalities", 'dataset_id': 'MRBRAINS_ID', 'test_size': "dataset_configs['MRBrainS'].validation_split", 'max_subjects': "dataset_configs['MRBrainS'].max_subjects", 'max_num_patches': "dataset_configs['MRBrainS'].max_num_patches", 'augment': "dataset_configs['MRBrainS'].augment", 'patch_size': "dataset_configs['MRBrainS'].patch_size", 'step': "dataset_configs['MRBrainS'].step", 'test_patch_size': "dataset_configs['MRBrainS'].test_patch_size", 'test_step': "dataset_configs['MRBrainS'].test_step", 'data_augmentation_config': 'data_augmentation_config'}), "(source_dir=\n dataset_configs['MRBrainS'].path, modalities=dataset_configs['MRBrainS'\n ].modalities, dataset_id=MRBRAINS_ID, test_size=dataset_configs[\n 'MRBrainS'].validation_split, max_subjects=dataset_configs['MRBrainS'].\n max_subjects, max_num_patches=dataset_configs['MRBrainS'].\n max_num_patches, augment=dataset_configs['MRBrainS'].augment,\n patch_size=dataset_configs['MRBrainS'].patch_size, step=dataset_configs\n ['MRBrainS'].step, test_patch_size=dataset_configs['MRBrainS'].\n test_patch_size, test_step=dataset_configs['MRBrainS'].test_step,\n data_augmentation_config=data_augmentation_config)\n", (5047, 5678), False, 'from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory\n'), ((6014, 6698), 'deepNormalize.inputs.datasets.ABIDESliceDatasetFactory.create_train_valid_test', 'ABIDESliceDatasetFactory.create_train_valid_test', ([], {'source_dir': "dataset_configs['ABIDE'].path", 'modalities': "dataset_configs['ABIDE'].modalities", 'dataset_id': 'ABIDE_ID', 'sites': "dataset_configs['ABIDE'].sites", 'max_subjects': "dataset_configs['ABIDE'].max_subjects", 'test_size': "dataset_configs['ABIDE'].validation_split", 'max_num_patches': "dataset_configs['ABIDE'].max_num_patches", 'augment': "dataset_configs['ABIDE'].augment", 'patch_size': "dataset_configs['ABIDE'].patch_size", 'step': "dataset_configs['ABIDE'].step", 'test_patch_size': "dataset_configs['ABIDE'].test_patch_size", 'test_step': "dataset_configs['ABIDE'].test_step", 'data_augmentation_config': 'data_augmentation_config'}), "(source_dir=dataset_configs\n ['ABIDE'].path, modalities=dataset_configs['ABIDE'].modalities,\n dataset_id=ABIDE_ID, sites=dataset_configs['ABIDE'].sites, max_subjects\n =dataset_configs['ABIDE'].max_subjects, test_size=dataset_configs[\n 'ABIDE'].validation_split, max_num_patches=dataset_configs['ABIDE'].\n max_num_patches, augment=dataset_configs['ABIDE'].augment, patch_size=\n dataset_configs['ABIDE'].patch_size, step=dataset_configs['ABIDE'].step,\n test_patch_size=dataset_configs['ABIDE'].test_patch_size, test_step=\n dataset_configs['ABIDE'].test_step, data_augmentation_config=\n data_augmentation_config)\n", (6062, 6698), False, 'from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory\n'), ((7086, 7398), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction.\n _source_images[0]]'], {'patch_size': "dataset_configs['iSEG'].test_patch_size", 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'models': '[model_trainers[0]]', 'segment': '(True)', 'batch_size': '(8)'}), "([iSEG_reconstruction._source_images[0],\n MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\n 'iSEG'].test_patch_size, reconstructed_image_size=(1, 256, 256, 192),\n step=dataset_configs['iSEG'].test_step, models=[model_trainers[0]],\n segment=True, batch_size=8)\n", (7104, 7398), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((7485, 7752), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction.\n _source_images[0]]'], {'patch_size': "dataset_configs['iSEG'].test_patch_size", 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'batch_size': '(50)'}), "([iSEG_reconstruction._source_images[0],\n MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\n 'iSEG'].test_patch_size, reconstructed_image_size=(1, 256, 256, 192),\n step=dataset_configs['iSEG'].test_step, batch_size=50)\n", (7503, 7752), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((7828, 8122), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction.\n _target_images[0]]'], {'patch_size': "dataset_configs['iSEG'].test_patch_size", 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'is_ground_truth': '(True)', 'batch_size': '(50)'}), "([iSEG_reconstruction._target_images[0],\n MRBrainS_reconstruction._target_images[0]], patch_size=dataset_configs[\n 'iSEG'].test_patch_size, reconstructed_image_size=(1, 256, 256, 192),\n step=dataset_configs['iSEG'].test_step, is_ground_truth=True, batch_size=50\n )\n", (7846, 8122), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((9095, 9443), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction.\n _source_images[0], ABIDE_reconstruction._source_images[0]]'], {'patch_size': '(1, 32, 32, 32)', 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'models': '[model_trainers[0]]', 'normalize_and_segment': '(True)', 'batch_size': '(4)'}), "([iSEG_reconstruction._source_images[0],\n MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction.\n _source_images[0]], patch_size=(1, 32, 32, 32),\n reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\n 'iSEG'].test_step, models=[model_trainers[0]], normalize_and_segment=\n True, batch_size=4)\n", (9113, 9443), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((9549, 9837), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction.\n _source_images[0], ABIDE_reconstruction._source_images[0]]'], {'patch_size': '(1, 32, 32, 32)', 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'batch_size': '(50)'}), "([iSEG_reconstruction._source_images[0],\n MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction.\n _source_images[0]], patch_size=(1, 32, 32, 32),\n reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\n 'iSEG'].test_step, batch_size=50)\n", (9567, 9837), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((9921, 10231), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction.\n _target_images[0], ABIDE_reconstruction._target_images[0]]'], {'patch_size': '(1, 32, 32, 32)', 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'batch_size': '(50)', 'is_ground_truth': '(True)'}), "([iSEG_reconstruction._target_images[0],\n MRBrainS_reconstruction._target_images[0], ABIDE_reconstruction.\n _target_images[0]], patch_size=(1, 32, 32, 32),\n reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\n 'iSEG'].test_step, batch_size=50, is_ground_truth=True)\n", (9939, 10231), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((11285, 11331), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['train_datasets'], {}), '(train_datasets)\n', (11315, 11331), False, 'import torch\n'), ((11356, 11402), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['valid_datasets'], {}), '(valid_datasets)\n', (11386, 11402), False, 'import torch\n'), ((11426, 11471), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['test_datasets'], {}), '(test_datasets)\n', (11456, 11471), False, 'import torch\n'), ((13092, 13207), 'kerosene.loggers.visdom.visdom.VisdomData', 'VisdomData', (['"""Experiment"""', '"""Experiment Config"""', 'PlotType.TEXT_PLOT', 'PlotFrequency.EVERY_EPOCH', 'None', 'config_html'], {}), "('Experiment', 'Experiment Config', PlotType.TEXT_PLOT,\n PlotFrequency.EVERY_EPOCH, None, config_html)\n", (13102, 13207), False, 'from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData\n'), ((2146, 2208), 'deepNormalize.config.parsers.ArgsParserFactory.create_parser', 'ArgsParserFactory.create_parser', (['ArgsParserType.MODEL_TRAINING'], {}), '(ArgsParserType.MODEL_TRAINING)\n', (2177, 2208), False, 'from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType\n'), ((3559, 3579), 'deepNormalize.factories.customModelFactory.CustomModelFactory', 'CustomModelFactory', ([], {}), '()\n', (3577, 3579), False, 'from deepNormalize.factories.customModelFactory import CustomModelFactory\n'), ((3647, 3671), 'deepNormalize.nn.criterions.CustomCriterionFactory', 'CustomCriterionFactory', ([], {}), '()\n', (3669, 3671), False, 'from deepNormalize.nn.criterions import CustomCriterionFactory\n'), ((8266, 8812), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction.\n _source_images[0]]'], {'patch_size': "dataset_configs['iSEG'].test_patch_size", 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'batch_size': '(50)', 'alpha': "data_augmentation_config['test']['bias_field']['alpha'][0]", 'prob_bias': "data_augmentation_config['test']['bias_field']['prob_bias']", 'snr': "data_augmentation_config['test']['noise']['snr']", 'prob_noise': "data_augmentation_config['test']['noise']['prob_noise']"}), "([iSEG_reconstruction._source_images[0],\n MRBrainS_reconstruction._source_images[0]], patch_size=dataset_configs[\n 'iSEG'].test_patch_size, reconstructed_image_size=(1, 256, 256, 192),\n step=dataset_configs['iSEG'].test_step, batch_size=50, alpha=\n data_augmentation_config['test']['bias_field']['alpha'][0], prob_bias=\n data_augmentation_config['test']['bias_field']['prob_bias'], snr=\n data_augmentation_config['test']['noise']['snr'], prob_noise=\n data_augmentation_config['test']['noise']['prob_noise'])\n", (8284, 8812), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((10388, 10955), 'deepNormalize.utils.image_slicer.ImageReconstructor', 'ImageReconstructor', (['[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction.\n _source_images[0], ABIDE_reconstruction._source_images[0]]'], {'patch_size': '(1, 32, 32, 32)', 'reconstructed_image_size': '(1, 256, 256, 192)', 'step': "dataset_configs['iSEG'].test_step", 'batch_size': '(50)', 'alpha': "data_augmentation_config['test']['bias_field']['alpha'][0]", 'prob_bias': "data_augmentation_config['test']['bias_field']['prob_bias']", 'snr': "data_augmentation_config['test']['noise']['snr']", 'prob_noise': "data_augmentation_config['test']['noise']['prob_noise']"}), "([iSEG_reconstruction._source_images[0],\n MRBrainS_reconstruction._source_images[0], ABIDE_reconstruction.\n _source_images[0]], patch_size=(1, 32, 32, 32),\n reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs[\n 'iSEG'].test_step, batch_size=50, alpha=data_augmentation_config['test'\n ]['bias_field']['alpha'][0], prob_bias=data_augmentation_config['test']\n ['bias_field']['prob_bias'], snr=data_augmentation_config['test'][\n 'noise']['snr'], prob_noise=data_augmentation_config['test']['noise'][\n 'prob_noise'])\n", (10406, 10955), False, 'from deepNormalize.utils.image_slicer import ImageReconstructor\n'), ((13716, 13755), 'deepNormalize.factories.customTrainerFactory.TrainerFactory', 'TrainerFactory', (['training_config.trainer'], {}), '(training_config.trainer)\n', (13730, 13755), False, 'from deepNormalize.factories.customTrainerFactory import TrainerFactory\n'), ((11672, 11855), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['dataset', 'training_config.batch_size'], {'sampler': 'None', 'shuffle': '(True)', 'num_workers': 'args.num_workers', 'collate_fn': 'augmented_sample_collate', 'drop_last': '(True)', 'pin_memory': '(True)'}), '(dataset, training_config.batch_size, sampler=None, shuffle=True,\n num_workers=args.num_workers, collate_fn=augmented_sample_collate,\n drop_last=True, pin_memory=True)\n', (11682, 11855), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((12850, 12885), 'os.path.normpath', 'os.path.normpath', (['visdom_config.env'], {}), '(visdom_config.env)\n', (12866, 12885), False, 'import os\n'), ((12745, 12780), 'os.path.normpath', 'os.path.normpath', (['visdom_config.env'], {}), '(visdom_config.env)\n', (12761, 12780), False, 'import os\n')]
|
from collections import defaultdict
import json
import re
import redis
import threading
import time
import traceback
import uuid
import base64
import binascii
TTL = 2
hash_keys = ('cmd', 'user')
cmd_hash_keys = {
'comment': ('addr',),
'extra_comment': ('addr',),
'area_comment': ('addr',),
'rename': ('addr',),
'stackvar_renamed': ('addr', 'offset', 'name',),
'struc_created': ('struc_name', 'is_union',),
'struc_deleted': ('struc_name',),
'struc_renamed': ('old_name', 'new_name',),
'struc_member_created': ('struc_name', 'offset', 'member_name', 'size', 'flag',),
'struc_member_deleted': ('struc_name', 'offset',),
'struc_member_renamed': ('struc_name', 'offset', 'member_name',),
'struc_member_changed': ('struc_name', 'offset', 'size',),
}
key_dec = {
'c': 'cmd',
'a': 'addr',
'u': 'user',
't': 'text',
'i': 'uuid',
'b': 'blocks'
}
key_enc = dict((v, k) for k, v in key_dec.items())
nick_filter = re.compile(r'[^a-zA-Z0-9_\-]')
def decode(data):
d = json.loads(data)
return dict((key_dec.get(k, k), v) for k, v in d.items())
def dtokey(d):
return tuple(((k, v) for k, v in sorted(d.items()) if k not in ('user', 'ts', 'uuid')))
def remove_ttl(a):
now = time.time()
return [d for d in a if now - d[0] < TTL]
class Client:
def __init__(self, host, port, nick, password=None):
self.r = redis.StrictRedis(host=host, port=port, password=password, socket_connect_timeout=5)
self.r.info()
self.nick = nick_filter.sub('_', nick)
self.ps = {}
self.nolock = threading.Lock()
self.nosend = defaultdict(list)
self.uuid = str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii'))
def debounce(self, no, data):
dkey = dtokey(data)
now = time.time()
with self.nolock:
for data in no:
ts = data[0]
key = data[1:]
if dkey == key and now - ts < TTL:
no.remove(data)
return True
return False
def _sub_thread(self, ps, cb, key):
for item in ps.listen():
try:
if item['type'] == 'message':
data = decode(item['data'])
if 'user' in data:
data['user'] = nick_filter.sub('_', data['user'])
# reject our own messages
if data.get('uuid') == self.uuid:
continue
with self.nolock:
self.nosend[key] = remove_ttl(self.nosend[key])
self.nosend[key].append((time.time(),) + dtokey(data))
cb(key, data)
elif item['type'] == 'subscribe':
decoded = []
for data in self.r.lrange(key, 0, -1):
try:
decoded.append(decode(data))
except Exception:
print('error decoding history', data)
traceback.print_exc()
state = []
dedup = set()
for data in reversed(decoded):
cmd = data.get('cmd')
if cmd:
keys = hash_keys + cmd_hash_keys.get(cmd, ())
hashkey = tuple([str(data.get(k)) for k in keys])
if all(hashkey):
if hashkey in dedup:
continue
dedup.add(hashkey)
state.append(data)
for data in reversed(state):
try:
with self.nolock:
self.nosend[key].append((time.time(),) + dtokey(data))
cb(key, data, replay=True)
except Exception:
print('error replaying history', data)
traceback.print_exc()
else:
print('unknown redis push', item)
except Exception:
print('error processing item', item)
traceback.print_exc()
def join(self, key, cb):
ps = self.r.pubsub()
ps.subscribe(key)
t = threading.Thread(target=self._sub_thread, args=(ps, cb, key))
t.daemon = True
t.start()
self.ps[key] = ps
self.publish(key, {'cmd': 'join'}, perm=False)
def leave(self, key):
ps = self.ps.pop(key, None)
if ps:
ps.unsubscribe(key)
def publish(self, key, data, perm=True, send_uuid=True):
if self.debounce(self.nosend[key], data):
return
data['user'] = self.nick
data['ts'] = self.r.time()[0]
if send_uuid:
data['uuid'] = self.uuid
data = dict((key_enc.get(k, k), v) for k, v in data.items())
data = json.dumps(data, separators=(',', ':'), sort_keys=True)
if perm:
self.r.rpush(key, data)
self.r.publish(key, data)
def push(self, key, data, send_uuid=True):
if send_uuid:
data['uuid'] = self.uuid
data = dict((key_enc.get(k, k), v) for k, v in data.items())
data = json.dumps(data, separators=(',', ':'), sort_keys=True)
self.r.lpush(key, data)
|
[
"json.loads",
"re.compile",
"threading.Lock",
"json.dumps",
"uuid.uuid4",
"collections.defaultdict",
"redis.StrictRedis",
"threading.Thread",
"traceback.print_exc",
"time.time"
] |
[((975, 1005), 're.compile', 're.compile', (['"""[^a-zA-Z0-9_\\\\-]"""'], {}), "('[^a-zA-Z0-9_\\\\-]')\n", (985, 1005), False, 'import re\n'), ((1033, 1049), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (1043, 1049), False, 'import json\n'), ((1250, 1261), 'time.time', 'time.time', ([], {}), '()\n', (1259, 1261), False, 'import time\n'), ((1397, 1485), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': 'host', 'port': 'port', 'password': 'password', 'socket_connect_timeout': '(5)'}), '(host=host, port=port, password=password,\n socket_connect_timeout=5)\n', (1414, 1485), False, 'import redis\n'), ((1594, 1610), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1608, 1610), False, 'import threading\n'), ((1633, 1650), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1644, 1650), False, 'from collections import defaultdict\n'), ((1824, 1835), 'time.time', 'time.time', ([], {}), '()\n', (1833, 1835), False, 'import time\n'), ((4434, 4495), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._sub_thread', 'args': '(ps, cb, key)'}), '(target=self._sub_thread, args=(ps, cb, key))\n', (4450, 4495), False, 'import threading\n'), ((5076, 5131), 'json.dumps', 'json.dumps', (['data'], {'separators': "(',', ':')", 'sort_keys': '(True)'}), "(data, separators=(',', ':'), sort_keys=True)\n", (5086, 5131), False, 'import json\n'), ((5410, 5465), 'json.dumps', 'json.dumps', (['data'], {'separators': "(',', ':')", 'sort_keys': '(True)'}), "(data, separators=(',', ':'), sort_keys=True)\n", (5420, 5465), False, 'import json\n'), ((4315, 4336), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4334, 4336), False, 'import traceback\n'), ((1711, 1723), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1721, 1723), False, 'import uuid\n'), ((2680, 2691), 'time.time', 'time.time', ([], {}), '()\n', (2689, 2691), False, 'import time\n'), ((3108, 3129), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3127, 3129), False, 'import traceback\n'), ((4118, 4139), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4137, 4139), False, 'import traceback\n'), ((3896, 3907), 'time.time', 'time.time', ([], {}), '()\n', (3905, 3907), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
"""URLs to manipulate columns."""
from django.urls import path
from ontask.condition import views
app_name = 'condition'
urlpatterns = [
#
# FILTERS
#
path(
'<int:pk>/create_filter/',
views.FilterCreateView.as_view(),
name='create_filter'),
path('<int:pk>/edit_filter/', views.edit_filter, name='edit_filter'),
path('<int:pk>/delete_filter/', views.delete_filter, name='delete_filter'),
#
# CONDITIONS
#
path(
'<int:pk>/create_condition/',
views.ConditionCreateView.as_view(),
name='create_condition'),
path(
'<int:pk>/edit_condition/',
views.edit_condition,
name='edit_condition'),
path(
'<int:pk>/delete_condition/',
views.delete_condition,
name='delete_condition'),
# Clone the condition
path(
'<int:pk>/clone_condition/',
views.clone_condition,
name='clone_condition'),
path(
'<int:pk>/<int:action_pk>/clone_condition/',
views.clone_condition,
name='clone_condition'),
]
|
[
"ontask.condition.views.FilterCreateView.as_view",
"django.urls.path",
"ontask.condition.views.ConditionCreateView.as_view"
] |
[((313, 381), 'django.urls.path', 'path', (['"""<int:pk>/edit_filter/"""', 'views.edit_filter'], {'name': '"""edit_filter"""'}), "('<int:pk>/edit_filter/', views.edit_filter, name='edit_filter')\n", (317, 381), False, 'from django.urls import path\n'), ((387, 461), 'django.urls.path', 'path', (['"""<int:pk>/delete_filter/"""', 'views.delete_filter'], {'name': '"""delete_filter"""'}), "('<int:pk>/delete_filter/', views.delete_filter, name='delete_filter')\n", (391, 461), False, 'from django.urls import path\n'), ((624, 701), 'django.urls.path', 'path', (['"""<int:pk>/edit_condition/"""', 'views.edit_condition'], {'name': '"""edit_condition"""'}), "('<int:pk>/edit_condition/', views.edit_condition, name='edit_condition')\n", (628, 701), False, 'from django.urls import path\n'), ((732, 820), 'django.urls.path', 'path', (['"""<int:pk>/delete_condition/"""', 'views.delete_condition'], {'name': '"""delete_condition"""'}), "('<int:pk>/delete_condition/', views.delete_condition, name=\n 'delete_condition')\n", (736, 820), False, 'from django.urls import path\n'), ((873, 958), 'django.urls.path', 'path', (['"""<int:pk>/clone_condition/"""', 'views.clone_condition'], {'name': '"""clone_condition"""'}), "('<int:pk>/clone_condition/', views.clone_condition, name='clone_condition'\n )\n", (877, 958), False, 'from django.urls import path\n'), ((984, 1084), 'django.urls.path', 'path', (['"""<int:pk>/<int:action_pk>/clone_condition/"""', 'views.clone_condition'], {'name': '"""clone_condition"""'}), "('<int:pk>/<int:action_pk>/clone_condition/', views.clone_condition,\n name='clone_condition')\n", (988, 1084), False, 'from django.urls import path\n'), ((244, 276), 'ontask.condition.views.FilterCreateView.as_view', 'views.FilterCreateView.as_view', ([], {}), '()\n', (274, 276), False, 'from ontask.condition import views\n'), ((549, 584), 'ontask.condition.views.ConditionCreateView.as_view', 'views.ConditionCreateView.as_view', ([], {}), '()\n', (582, 584), False, 'from ontask.condition import views\n')]
|
"""Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Defines the architecture of the Video Classifier.
"""
import math
import tensorflow as tf
class NetVLAD(tf.keras.layers.Layer):
"""Applies NetVLAD to the input.
Args:
num_clusters: The number of clusters to use.
input_shape: 3D tensor denoting the input shape of the NetVLAD layer.
Input Shape:
3D tensor with shape: `(batch_size, time, feature_dim)`.
Output shape:
2D tensor with shape: `(batch_size, feature_dim * num_clusters)`.
"""
def __init__(self, num_clusters, input_shape, **kwargs):
super().__init__(**kwargs)
if num_clusters <= 0:
raise ValueError("`num_clusters` must be greater than 1: %i" % num_clusters)
self.num_clusters = num_clusters
feature_dim = input_shape[-1]
if not isinstance(feature_dim, int):
feature_dim = feature_dim.value
self.fc = tf.keras.layers.Dense(
units=self.num_clusters,
activation=tf.nn.softmax,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="vlad_fc" + str(num_clusters)
)
self.cluster_centers = self.add_weight(
shape=(1, feature_dim, self.num_clusters),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=1.0 / math.sqrt(feature_dim)
),
trainable=True,
name="cluster_centers" + str(num_clusters)
)
self.feature_dim = feature_dim
self.max_frames = input_shape[-2]
def call(self, frames):
"""Apply the NetVLAD module to the given frames.
Args:
frames: A tensor with shape [batch_size, max_frames, feature_dim].
Returns:
vlad_out: A tensor with shape [batch_size, feature_dim * num_clusters].
Raises:
ValueError: If the `feature_dim` of input is not defined.
"""
feature_dim = self.feature_dim
max_frames = self.max_frames
frames = tf.reshape(frames, (-1, feature_dim))
activation = self.fc(frames)
activation = tf.reshape(activation, (-1, max_frames, self.num_clusters))
activation_sum = tf.math.reduce_sum(activation, axis=-2, keepdims=True)
cluster_activation = activation_sum * self.cluster_centers
frames = tf.reshape(frames, (-1, max_frames, feature_dim))
activation = tf.transpose(
tf.matmul(tf.transpose(activation, perm=(0, 2, 1)), frames), perm=(0, 2, 1)
)
vlad_out = activation - cluster_activation
vlad_out = tf.nn.l2_normalize(vlad_out, 1)
vlad_out = tf.reshape(vlad_out, (-1, feature_dim * self.num_clusters))
vlad_out = tf.nn.l2_normalize(vlad_out, 1)
return vlad_out
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
return tf.TensorShape([input_shape[0], input_shape[-1] * self.num_clusters])
def get_config(self):
config = {"num_clusters": self.num_clusters}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class ContextGating(tf.keras.layers.Layer):
"""Implements the Context Gating Layer from https://arxiv.org/abs/1706.06905
Input shape:
2D tensor with shape: `(batch_size, feature_dim)`.
Output shape:
2D tensor with shape: `(batch_size, feature_dim)`.
"""
def __init__(self, input_shape, **kwargs):
super(ContextGating, self).__init__(**kwargs)
feature_dim = input_shape[-1]
if not isinstance(feature_dim, int):
feature_dim = feature_dim.value
self.fc = tf.keras.layers.Dense(
units=feature_dim,
activation=tf.nn.sigmoid,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
)
def call(self, model_input):
"""Apply the ContextGating module to the given input.
Args:
model_input: A tensor with shape [batch_size, feature_dim].
Returns:
A tensor with shape [batch_size, feature_dim].
Raises:
ValueError: If the `feature_dim` of model_input is not defined.
"""
model_input.shape.assert_has_rank(2)
feature_dim = model_input.shape.as_list()[-1]
if feature_dim is None:
raise ValueError("Last dimension must be defined.")
context_gate = self.fc(model_input)
output = tf.math.multiply(context_gate, model_input)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
return dict(list(base_config.items()))
class MOELogistic(tf.keras.layers.Layer):
"""Implements a Mixture of Logistic Experts classifier.
Input shape:
2D tensor with shape: `(batch_size, feature_dim)`.
Output shape:
2D tensor with shape: `(batch_size, num_classes)`.
"""
def __init__(self, input_shape, num_classes, num_mixtures, **kwargs):
super(MOELogistic, self).__init__(**kwargs)
self.num_classes = num_classes
self.num_mixtures = num_mixtures
self.gate_fc = tf.keras.layers.Dense(
units=num_classes*(num_mixtures+1),
kernel_regularizer=tf.keras.regularizers.l2(1e-6),
)
self.expert_fc = tf.keras.layers.Dense(
units=num_classes*num_mixtures,
kernel_regularizer=tf.keras.regularizers.l2(1e-6),
)
def call(self, input):
"""Apply the MoE algorithm to the given input.
Args:
input: A tensor with shape [batch_size, feature_dim].
Returns:
A tensor with shape [batch_size, num_classes].
Raises:
ValueError: If the `feature_dim` of input is not defined.
"""
gate_activations = self.gate_fc(input)
expert_activations = self.expert_fc(input)
#Calculate the distribution across mixtures
gate_dist = tf.nn.softmax(tf.reshape(gate_activations, [-1, self.num_mixtures+1]))
expert_dist = tf.nn.sigmoid(tf.reshape(expert_activations, [-1, self.num_mixtures]))
probs = tf.reduce_sum(tf.math.multiply(gate_dist[:,:self.num_mixtures], expert_dist),1)
probs = tf.reshape(probs, [-1, self.num_classes])
return probs
def compute_output_shape(self, input_shape):
return (input_shape[0], self.num_classes)
def get_config(self):
base_config = super().get_config()
config = base_config.update({'number of classes': self.num_classes, 'number of mixtures': self.num_mixtures})
return config
class VideoClassifier:
"""The Video Classifier model, implemented according to the winning model from the Youtube-8M Challenge.
The model can be found here: https://arxiv.org/pdf/1706.06905.pdf
Arguments:
num_clusters: the number of clusters to be used for NetVLAD. The audio clusters will be num_clusters/2.
video_input_shape: shape of the input video features. Shape of [batch_size, num_samples, video_feature_dim].
audio_input_shape: shape fo the input audio features. Shape of [batch_size, num_samples, audio_feature_dim].
Raises:
ValueError: If num_clusters is not divisible by 2.
ValueError: If the batch sizes of the audio_input_shape and video_input_shape do not match.
ValueError: If the number of samples of the audio_input_shape and video_input_shape do not match.
"""
def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, num_mixtures, fc_units, iterations, **kwargs):
super(VideoClassifier, self).__init__(**kwargs)
if num_clusters % 2 != 0:
raise ValueError("num_clusters must be divisible by 2.")
batch_size = video_input_shape[0]
if audio_input_shape[0] != batch_size:
raise ValueError("audio_input_shape[0] must equal video_input_shape[0]. Batch sizes must equal.")
if audio_input_shape[1] != video_input_shape[1]:
raise ValueError("audio_input_shape[1] must equal video_input_shape[1]. Number of samples must equal.")
self.num_frames = video_input_shape[1]
self.num_classes = num_classes
self.num_mixtures = num_mixtures
self.iterations = iterations
self.video_feature_dim = video_input_shape[2]
self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name="video_vlad")
self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape, name="audio_vlad")
#Relu6 is used as it is employed in the paper.
self.fc = tf.keras.layers.Dense(
units=fc_units,
activation=tf.nn.relu6,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="main_fc"
)
self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name="first_cg")
self.moe = MOELogistic(input_shape=self.first_cg.compute_output_shape((batch_size, fc_units)), num_classes=self.num_classes, num_mixtures=self.num_mixtures, name="moe")
self.second_cg = ContextGating(input_shape=self.moe.compute_output_shape((batch_size, fc_units)), name="second_cg")
def build_model(self, input_shape, batch_size):
"""Perform one forward pass of the model.
Args:
model_input: input features of shape [batch_size, max_frames, video_feature_dim + audio_feature_dim].
Returns:
A tensor with shape [batch_size, num_classes].
"""
model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size)
video_input = model_input[:,:,:self.video_feature_dim]
audio_input = model_input[:,:,self.video_feature_dim:]
video_vlad_out = self.video_vlad(video_input)
audio_vlad_out = self.audio_vlad(audio_input)
vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1)
fc_out = self.fc(vlad_out)
cg_out = self.first_cg(fc_out)
moe_out = self.moe(cg_out)
final_out = self.second_cg(moe_out)
final_model = tf.keras.models.Model(inputs=model_input, outputs=final_out)
return final_model
class SegmentClassifier:
"""The Segment Classifier model, implemented according to the winning model from the Youtube-8M Challenge.
The model can be found here: https://arxiv.org/abs/1911.08548
Arguments:
num_clusters: the number of clusters to be used for NetVLAD. The audio clusters will be num_clusters/2.
video_input_shape: shape of the input video features. Shape of [batch_size, num_samples, video_feature_dim].
audio_input_shape: shape fo the input audio features. Shape of [batch_size, num_samples, audio_feature_dim].
Raises:
ValueError: If num_clusters is not divisible by 2.
ValueError: If the batch sizes of the audio_input_shape and video_input_shape do not match.
ValueError: If the number of samples of the audio_input_shape and video_input_shape do not match.
"""
def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, fc_units, **kwargs):
super(SegmentClassifier, self).__init__(**kwargs)
if num_clusters % 2 != 0:
raise ValueError("num_clusters must be divisible by 2.")
batch_size = video_input_shape[0]
if audio_input_shape[0] != batch_size:
raise ValueError("audio_input_shape[0] must equal video_input_shape[0]. Batch sizes must equal.")
if audio_input_shape[1] != video_input_shape[1]:
raise ValueError("audio_input_shape[1] must equal video_input_shape[1]. Number of samples must equal.")
self.num_frames = video_input_shape[1]
self.num_classes = num_classes
self.video_feature_dim = video_input_shape[2]
self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name="video_vlad")
self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape, name="audio_vlad")
#Relu6 is used as it is employed in the paper.
self.fc = tf.keras.layers.Dense(
units=fc_units,
activation=tf.nn.relu6,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="main_fc"
)
self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name="first_cg")
self.fc2 = tf.keras.layers.Dense(
units=1,
activation=tf.keras.activations.sigmoid,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="main_fc2"
)
def build_model(self, input_shape, second_input_shape, batch_size):
"""Perform one forward pass of the model.
Args:
input_shape: input shape for video features. Shape is of the form: [max_frames, video_feature_dim + audio_feature_dim].
second_input_shape: input shape of new class specific features. Shape is of the form [num_new_features]
Returns:
A tensor with shape [batch_size, num_classes].
"""
model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size)
model_input2 = tf.keras.layers.Input(shape=second_input_shape, batch_size=batch_size)
video_input = model_input[:,:,:self.video_feature_dim]
audio_input = model_input[:,:,self.video_feature_dim:]
video_vlad_out = self.video_vlad(video_input)
audio_vlad_out = self.audio_vlad(audio_input)
vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1)
vlad_out = tf.concat([vlad_out, model_input2], axis=1)
fc_out = self.fc(vlad_out)
cg_out = self.first_cg(fc_out)
final_out = self.fc2(cg_out)
final_model = tf.keras.models.Model(inputs=[model_input, model_input2], outputs=final_out)
return final_model
|
[
"tensorflow.keras.layers.Input",
"tensorflow.transpose",
"tensorflow.nn.l2_normalize",
"math.sqrt",
"tensorflow.concat",
"tensorflow.math.multiply",
"tensorflow.reshape",
"tensorflow.keras.models.Model",
"tensorflow.math.reduce_sum",
"tensorflow.TensorShape",
"tensorflow.keras.regularizers.l2"
] |
[((2358, 2395), 'tensorflow.reshape', 'tf.reshape', (['frames', '(-1, feature_dim)'], {}), '(frames, (-1, feature_dim))\n', (2368, 2395), True, 'import tensorflow as tf\n'), ((2446, 2505), 'tensorflow.reshape', 'tf.reshape', (['activation', '(-1, max_frames, self.num_clusters)'], {}), '(activation, (-1, max_frames, self.num_clusters))\n', (2456, 2505), True, 'import tensorflow as tf\n'), ((2532, 2586), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['activation'], {'axis': '(-2)', 'keepdims': '(True)'}), '(activation, axis=-2, keepdims=True)\n', (2550, 2586), True, 'import tensorflow as tf\n'), ((2664, 2713), 'tensorflow.reshape', 'tf.reshape', (['frames', '(-1, max_frames, feature_dim)'], {}), '(frames, (-1, max_frames, feature_dim))\n', (2674, 2713), True, 'import tensorflow as tf\n'), ((2900, 2931), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['vlad_out', '(1)'], {}), '(vlad_out, 1)\n', (2918, 2931), True, 'import tensorflow as tf\n'), ((2947, 3006), 'tensorflow.reshape', 'tf.reshape', (['vlad_out', '(-1, feature_dim * self.num_clusters)'], {}), '(vlad_out, (-1, feature_dim * self.num_clusters))\n', (2957, 3006), True, 'import tensorflow as tf\n'), ((3022, 3053), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['vlad_out', '(1)'], {}), '(vlad_out, 1)\n', (3040, 3053), True, 'import tensorflow as tf\n'), ((3190, 3259), 'tensorflow.TensorShape', 'tf.TensorShape', (['[input_shape[0], input_shape[-1] * self.num_clusters]'], {}), '([input_shape[0], input_shape[-1] * self.num_clusters])\n', (3204, 3259), True, 'import tensorflow as tf\n'), ((4626, 4669), 'tensorflow.math.multiply', 'tf.math.multiply', (['context_gate', 'model_input'], {}), '(context_gate, model_input)\n', (4642, 4669), True, 'import tensorflow as tf\n'), ((6320, 6361), 'tensorflow.reshape', 'tf.reshape', (['probs', '[-1, self.num_classes]'], {}), '(probs, [-1, self.num_classes])\n', (6330, 6361), True, 'import tensorflow as tf\n'), ((9425, 9488), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': 'input_shape', 'batch_size': 'batch_size'}), '(shape=input_shape, batch_size=batch_size)\n', (9446, 9488), True, 'import tensorflow as tf\n'), ((9725, 9776), 'tensorflow.concat', 'tf.concat', (['[video_vlad_out, audio_vlad_out]'], {'axis': '(1)'}), '([video_vlad_out, audio_vlad_out], axis=1)\n', (9734, 9776), True, 'import tensorflow as tf\n'), ((9938, 9998), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'model_input', 'outputs': 'final_out'}), '(inputs=model_input, outputs=final_out)\n', (9959, 9998), True, 'import tensorflow as tf\n'), ((12723, 12786), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': 'input_shape', 'batch_size': 'batch_size'}), '(shape=input_shape, batch_size=batch_size)\n', (12744, 12786), True, 'import tensorflow as tf\n'), ((12806, 12876), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': 'second_input_shape', 'batch_size': 'batch_size'}), '(shape=second_input_shape, batch_size=batch_size)\n', (12827, 12876), True, 'import tensorflow as tf\n'), ((13110, 13161), 'tensorflow.concat', 'tf.concat', (['[video_vlad_out, audio_vlad_out]'], {'axis': '(1)'}), '([video_vlad_out, audio_vlad_out], axis=1)\n', (13119, 13161), True, 'import tensorflow as tf\n'), ((13177, 13220), 'tensorflow.concat', 'tf.concat', (['[vlad_out, model_input2]'], {'axis': '(1)'}), '([vlad_out, model_input2], axis=1)\n', (13186, 13220), True, 'import tensorflow as tf\n'), ((13338, 13414), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': '[model_input, model_input2]', 'outputs': 'final_out'}), '(inputs=[model_input, model_input2], outputs=final_out)\n', (13359, 13414), True, 'import tensorflow as tf\n'), ((6069, 6126), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, self.num_mixtures + 1]'], {}), '(gate_activations, [-1, self.num_mixtures + 1])\n', (6079, 6126), True, 'import tensorflow as tf\n'), ((6158, 6213), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, self.num_mixtures]'], {}), '(expert_activations, [-1, self.num_mixtures])\n', (6168, 6213), True, 'import tensorflow as tf\n'), ((6242, 6305), 'tensorflow.math.multiply', 'tf.math.multiply', (['gate_dist[:, :self.num_mixtures]', 'expert_dist'], {}), '(gate_dist[:, :self.num_mixtures], expert_dist)\n', (6258, 6305), True, 'import tensorflow as tf\n'), ((1497, 1528), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(1e-05)'], {}), '(1e-05)\n', (1521, 1528), True, 'import tensorflow as tf\n'), ((2761, 2801), 'tensorflow.transpose', 'tf.transpose', (['activation'], {'perm': '(0, 2, 1)'}), '(activation, perm=(0, 2, 1))\n', (2773, 2801), True, 'import tensorflow as tf\n'), ((3141, 3168), 'tensorflow.TensorShape', 'tf.TensorShape', (['input_shape'], {}), '(input_shape)\n', (3155, 3168), True, 'import tensorflow as tf\n'), ((4035, 4066), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(1e-05)'], {}), '(1e-05)\n', (4059, 4066), True, 'import tensorflow as tf\n'), ((5418, 5449), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(1e-06)'], {}), '(1e-06)\n', (5442, 5449), True, 'import tensorflow as tf\n'), ((5564, 5595), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(1e-06)'], {}), '(1e-06)\n', (5588, 5595), True, 'import tensorflow as tf\n'), ((8675, 8706), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(1e-05)'], {}), '(1e-05)\n', (8699, 8706), True, 'import tensorflow as tf\n'), ((11934, 11965), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(1e-05)'], {}), '(1e-05)\n', (11958, 11965), True, 'import tensorflow as tf\n'), ((12207, 12238), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(1e-05)'], {}), '(1e-05)\n', (12231, 12238), True, 'import tensorflow as tf\n'), ((1747, 1769), 'math.sqrt', 'math.sqrt', (['feature_dim'], {}), '(feature_dim)\n', (1756, 1769), False, 'import math\n')]
|
import sys
import unittest
import os
import tempfile
from netCDF4 import Dataset
import numpy as np
from numpy.testing import assert_array_equal
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
VL_NAME = 'vlen_type'
VL_BASETYPE = np.int16
DIM1_NAME = 'lon'
DIM2_NAME = 'lat'
nlons = 5; nlats = 5
VAR1_NAME = 'ragged'
VAR2_NAME = 'strings'
VAR3_NAME = 'strings_alt'
VAR4_NAME = 'string_scalar'
VAR5_NAME = 'vlen_scalar'
data = np.empty(nlats*nlons,object)
datas = np.empty(nlats*nlons,object)
nn = 0
for n in range(nlats*nlons):
nn = nn + 1
data[n] = np.arange(nn,dtype=VL_BASETYPE)
datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)])
data = np.reshape(data,(nlats,nlons))
datas = np.reshape(datas,(nlats,nlons))
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
ragged = f.createVariable(VAR1_NAME, vlen_t,\
(DIM2_NAME,DIM1_NAME))
strings = f.createVariable(VAR2_NAME, str,
(DIM2_NAME,DIM1_NAME))
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
string_scalar = f.createVariable(VAR4_NAME,str,())
vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,())
ragged[:] = data
ragged[-1,-1] = data[-1,-1]
strings[:] = datas
strings[-2,-2] = datas[-2,-2]
strings_alt[:] = datas.astype(str)
string_scalar[...] = 'foo' #issue458
vlen_scalar[...] = np.array([1,2,3],np.int16)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
v = f.variables[VAR1_NAME]
vs = f.variables[VAR2_NAME]
vs_alt = f.variables[VAR3_NAME]
assert list(f.vltypes.keys()) == [VL_NAME]
assert f.vltypes[VL_NAME].dtype == VL_BASETYPE
assert f.variables['string_scalar'][...] == 'foo'
assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16))
data2 = v[:]
data2s = vs[:]
for i in range(nlons):
for j in range(nlats):
assert_array_equal(data2[j,i], data[j,i])
assert datas[j,i] == data2s[j,i]
assert_array_equal(datas, vs_alt[:])
f.close()
class TestInvalidDataType(unittest.TestCase):
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC')
f.createDimension('x', 1)
# using assertRaisesRegext as a context manager
# only works with python >= 2.7 (issue #497)
#with self.assertRaisesRegexp(ValueError, 'strings are only supported'):
# f.createVariable('foo', str, ('x',))
try:
f.createVariable('foo', str, ('x',))
except ValueError:
pass
f.close()
os.remove(FILE_NAME)
class TestScalarVlenString(unittest.TestCase):
# issue 333
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF4')
teststring = f.createVariable('teststring', str)
stringout = "yyyymmdd_hhmmss"
teststring[()] = stringout
f.close()
f = Dataset(FILE_NAME)
assert f.variables['teststring'][:] == stringout
f.close()
os.remove(FILE_NAME)
class TestIntegerIndex(unittest.TestCase):
# issue 526
def runTest(self):
strtest = Dataset(FILE_NAME, 'w', format='NETCDF4')
strtest.createDimension('tenstrings', 10)
strtest.createVariable('tenstrings', str, ['tenstrings'])
strtest['tenstrings'][np.int32(5)] = 'asdf'
strtest['tenstrings'][6.0] = 'asdf'
strtest.close()
f = Dataset(FILE_NAME)
assert f.variables['tenstrings'][np.int32(5)] == 'asdf'
assert f.variables['tenstrings'][6.0] == 'asdf'
f.close()
os.remove(FILE_NAME)
class TestObjectArrayIndexing(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
strings_alt[:] = datas.astype(str)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
vs_alt = f.variables[VAR3_NAME]
unicode_strings = vs_alt[:]
fancy_indexed = unicode_strings[0][[1,2,4]]
assert fancy_indexed[0] == 'abc'
assert fancy_indexed[1] == 'abcd'
assert fancy_indexed[2] == 'abcdef'
f.close()
class VlenAppendTestCase(unittest.TestCase):
def setUp(self):
import netCDF4
if netCDF4.__netcdf4libversion__ < "4.4.1":
self.skip = True
try:
self.skipTest("This test requires NetCDF 4.4.1 or later.")
except AttributeError:
# workaround for Python 2.6 (skipTest(reason) is new
# in Python 2.7)
pass
else:
self.skip = False
self.file = FILE_NAME
f = Dataset(self.file, 'w')
vlen_type = f.createVLType(np.float64, 'vltest')
f.createDimension('x', None)
v = f.createVariable('vl', vlen_type, 'x')
w = f.createVariable('vl2', np.float64, 'x')
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing appending to vlen variables (issue #527)."""
# workaround for Python 2.6
if self.skip:
return
f = Dataset(self.file, 'a')
w = f.variables["vl2"]
v = f.variables["vl"]
w[0:3] = np.arange(3, dtype=np.float64)
v[0] # sometimes crashes
v[0].tolist() # sometimes crashes
v[0].size # BOOM!
f.close()
class Vlen_ScaledInts(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
nc = Dataset(self.file, 'w')
vlen_type = nc.createVLType(np.uint8, 'vltest')
nc.createDimension('x', None)
v = nc.createVariable('vl', vlen_type, 'x')
v.scale_factor = 1./254.
v.missing_value=np.array(255,np.uint8)
# random lengths between 1 and 1000
ilen = np.random.randint(1,1000,size=100)
n = 0
for nlen in ilen:
data = np.random.uniform(low=0.0, high=1.0, size=nlen)
v[n] = data
if n==99: self.data = data
n += 1
nc.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing packing float vlens as scaled integers (issue #1003)."""
nc = Dataset(self.file)
data = nc['vl'][-1]
# check max error of compression
err = np.abs(data - self.data)
assert(err.max() < nc['vl'].scale_factor)
# turn off auto-scaling
nc.set_auto_maskandscale(False)
data = nc['vl'][-1]
assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))
nc.close()
if __name__ == '__main__':
unittest.main()
|
[
"numpy.abs",
"numpy.reshape",
"numpy.testing.assert_array_equal",
"netCDF4.Dataset",
"numpy.int32",
"numpy.array",
"numpy.random.randint",
"numpy.random.uniform",
"numpy.empty",
"numpy.around",
"tempfile.NamedTemporaryFile",
"unittest.main",
"numpy.arange",
"os.remove"
] |
[((451, 482), 'numpy.empty', 'np.empty', (['(nlats * nlons)', 'object'], {}), '(nlats * nlons, object)\n', (459, 482), True, 'import numpy as np\n'), ((488, 519), 'numpy.empty', 'np.empty', (['(nlats * nlons)', 'object'], {}), '(nlats * nlons, object)\n', (496, 519), True, 'import numpy as np\n'), ((682, 714), 'numpy.reshape', 'np.reshape', (['data', '(nlats, nlons)'], {}), '(data, (nlats, nlons))\n', (692, 714), True, 'import numpy as np\n'), ((721, 754), 'numpy.reshape', 'np.reshape', (['datas', '(nlats, nlons)'], {}), '(datas, (nlats, nlons))\n', (731, 754), True, 'import numpy as np\n'), ((158, 213), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".nc"""', 'delete': '(False)'}), "(suffix='.nc', delete=False)\n", (185, 213), False, 'import tempfile\n'), ((583, 615), 'numpy.arange', 'np.arange', (['nn'], {'dtype': 'VL_BASETYPE'}), '(nn, dtype=VL_BASETYPE)\n', (592, 615), True, 'import numpy as np\n'), ((7659, 7674), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7672, 7674), False, 'import unittest\n'), ((863, 886), 'netCDF4.Dataset', 'Dataset', (['self.file', '"""w"""'], {}), "(self.file, 'w')\n", (870, 886), False, 'from netCDF4 import Dataset\n'), ((1708, 1737), 'numpy.array', 'np.array', (['[1, 2, 3]', 'np.int16'], {}), '([1, 2, 3], np.int16)\n', (1716, 1737), True, 'import numpy as np\n'), ((1823, 1843), 'os.remove', 'os.remove', (['self.file'], {}), '(self.file)\n', (1832, 1843), False, 'import os\n'), ((1917, 1940), 'netCDF4.Dataset', 'Dataset', (['self.file', '"""r"""'], {}), "(self.file, 'r')\n", (1924, 1940), False, 'from netCDF4 import Dataset\n'), ((2528, 2564), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['datas', 'vs_alt[:]'], {}), '(datas, vs_alt[:])\n', (2546, 2564), False, 'from numpy.testing import assert_array_equal\n'), ((2666, 2715), 'netCDF4.Dataset', 'Dataset', (['FILE_NAME', '"""w"""'], {'format': '"""NETCDF3_CLASSIC"""'}), "(FILE_NAME, 'w', format='NETCDF3_CLASSIC')\n", (2673, 2715), False, 'from netCDF4 import Dataset\n'), ((3122, 3142), 'os.remove', 'os.remove', (['FILE_NAME'], {}), '(FILE_NAME)\n', (3131, 3142), False, 'import os\n'), ((3242, 3283), 'netCDF4.Dataset', 'Dataset', (['FILE_NAME', '"""w"""'], {'format': '"""NETCDF4"""'}), "(FILE_NAME, 'w', format='NETCDF4')\n", (3249, 3283), False, 'from netCDF4 import Dataset\n'), ((3444, 3462), 'netCDF4.Dataset', 'Dataset', (['FILE_NAME'], {}), '(FILE_NAME)\n', (3451, 3462), False, 'from netCDF4 import Dataset\n'), ((3546, 3566), 'os.remove', 'os.remove', (['FILE_NAME'], {}), '(FILE_NAME)\n', (3555, 3566), False, 'import os\n'), ((3668, 3709), 'netCDF4.Dataset', 'Dataset', (['FILE_NAME', '"""w"""'], {'format': '"""NETCDF4"""'}), "(FILE_NAME, 'w', format='NETCDF4')\n", (3675, 3709), False, 'from netCDF4 import Dataset\n'), ((3958, 3976), 'netCDF4.Dataset', 'Dataset', (['FILE_NAME'], {}), '(FILE_NAME)\n', (3965, 3976), False, 'from netCDF4 import Dataset\n'), ((4123, 4143), 'os.remove', 'os.remove', (['FILE_NAME'], {}), '(FILE_NAME)\n', (4132, 4143), False, 'import os\n'), ((4259, 4282), 'netCDF4.Dataset', 'Dataset', (['self.file', '"""w"""'], {}), "(self.file, 'w')\n", (4266, 4282), False, 'from netCDF4 import Dataset\n'), ((4691, 4711), 'os.remove', 'os.remove', (['self.file'], {}), '(self.file)\n', (4700, 4711), False, 'import os\n'), ((4785, 4808), 'netCDF4.Dataset', 'Dataset', (['self.file', '"""r"""'], {}), "(self.file, 'r')\n", (4792, 4808), False, 'from netCDF4 import Dataset\n'), ((5591, 5614), 'netCDF4.Dataset', 'Dataset', (['self.file', '"""w"""'], {}), "(self.file, 'w')\n", (5598, 5614), False, 'from netCDF4 import Dataset\n'), ((5901, 5921), 'os.remove', 'os.remove', (['self.file'], {}), '(self.file)\n', (5910, 5921), False, 'import os\n'), ((6100, 6123), 'netCDF4.Dataset', 'Dataset', (['self.file', '"""a"""'], {}), "(self.file, 'a')\n", (6107, 6123), False, 'from netCDF4 import Dataset\n'), ((6202, 6232), 'numpy.arange', 'np.arange', (['(3)'], {'dtype': 'np.float64'}), '(3, dtype=np.float64)\n', (6211, 6232), True, 'import numpy as np\n'), ((6502, 6525), 'netCDF4.Dataset', 'Dataset', (['self.file', '"""w"""'], {}), "(self.file, 'w')\n", (6509, 6525), False, 'from netCDF4 import Dataset\n'), ((6729, 6752), 'numpy.array', 'np.array', (['(255)', 'np.uint8'], {}), '(255, np.uint8)\n', (6737, 6752), True, 'import numpy as np\n'), ((6811, 6847), 'numpy.random.randint', 'np.random.randint', (['(1)', '(1000)'], {'size': '(100)'}), '(1, 1000, size=100)\n', (6828, 6847), True, 'import numpy as np\n'), ((7123, 7143), 'os.remove', 'os.remove', (['self.file'], {}), '(self.file)\n', (7132, 7143), False, 'import os\n'), ((7256, 7274), 'netCDF4.Dataset', 'Dataset', (['self.file'], {}), '(self.file)\n', (7263, 7274), False, 'from netCDF4 import Dataset\n'), ((7358, 7382), 'numpy.abs', 'np.abs', (['(data - self.data)'], {}), '(data - self.data)\n', (7364, 7382), True, 'import numpy as np\n'), ((2275, 2304), 'numpy.array', 'np.array', (['[1, 2, 3]', 'np.int16'], {}), '([1, 2, 3], np.int16)\n', (2283, 2304), True, 'import numpy as np\n'), ((3856, 3867), 'numpy.int32', 'np.int32', (['(5)'], {}), '(5)\n', (3864, 3867), True, 'import numpy as np\n'), ((6905, 6952), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)', 'size': 'nlen'}), '(low=0.0, high=1.0, size=nlen)\n', (6922, 6952), True, 'import numpy as np\n'), ((7560, 7608), 'numpy.around', 'np.around', (["(self.data[-1] / nc['vl'].scale_factor)"], {}), "(self.data[-1] / nc['vl'].scale_factor)\n", (7569, 7608), True, 'import numpy as np\n'), ((2429, 2472), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['data2[j, i]', 'data[j, i]'], {}), '(data2[j, i], data[j, i])\n', (2447, 2472), False, 'from numpy.testing import assert_array_equal\n'), ((4018, 4029), 'numpy.int32', 'np.int32', (['(5)'], {}), '(5)\n', (4026, 4029), True, 'import numpy as np\n')]
|
# Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility to run functions and methods once."""
import uuid
from sonnet.src import utils
_ONCE_PROPERTY = "_snt_once"
def _check_no_output(output):
if output is not None:
raise ValueError("@snt.once decorated functions cannot return values")
def once(f):
"""Decorator which ensures a wrapped method is only ever run once.
>>> @snt.once
... def f():
... print('Hello, world!')
>>> f()
Hello, world!
>>> f()
>>> f()
If `f` is a method then it will be evaluated once per instance:
>>> class MyObject:
... @snt.once
... def f(self):
... print('Hello, world!')
>>> o = MyObject()
>>> o.f()
Hello, world!
>>> o.f()
>>> o2 = MyObject()
>>> o2.f()
Hello, world!
>>> o.f()
>>> o2.f()
If an error is raised during execution of `f` it will be raised to the user.
Next time the method is run, it will be treated as not having run before.
Args:
f: A function to wrap which should only be called once.
Returns:
Wrapped version of `f` which will only evaluate `f` the first time it is
called.
"""
# TODO(tomhennigan) Perhaps some more human friendly identifier?
once_id = uuid.uuid4()
@utils.decorator
def wrapper(wrapped, instance, args, kwargs):
"""Decorator which ensures a wrapped method is only ever run once."""
if instance is None:
# NOTE: We can't use the weakset since you can't weakref None.
if not wrapper.seen_none:
_check_no_output(wrapped(*args, **kwargs))
wrapper.seen_none = True
return
# Get or set the `seen` set for this object.
seen = getattr(instance, _ONCE_PROPERTY, None)
if seen is None:
seen = set()
setattr(instance, _ONCE_PROPERTY, seen)
if once_id not in seen:
_check_no_output(wrapped(*args, **kwargs))
seen.add(once_id)
wrapper.seen_none = False
decorated = wrapper(f) # pylint: disable=no-value-for-parameter,assignment-from-none
decorated.__snt_once_wrapped__ = f
return decorated
|
[
"uuid.uuid4"
] |
[((1923, 1935), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1933, 1935), False, 'import uuid\n')]
|
import csv
import datetime
from collections import defaultdict
from django.contrib import messages
from django.http.response import FileResponse
from django.shortcuts import redirect, render
from django.utils import timezone
from django.views.decorators.http import require_GET, require_http_methods, require_POST
from authentication.admin_authentication import (authenticate_admin,
authenticate_researcher_study_access, forest_enabled)
from constants.data_access_api_constants import CHUNK_FIELDS
from constants.forest_constants import ForestTaskStatus, ForestTree
from database.data_access_models import ChunkRegistry
from database.study_models import Study
from database.tableau_api_models import ForestTask
from database.user_models import Participant
from forms.django_forms import CreateTasksForm
from libs.http_utils import easy_url
from libs.internal_types import ParticipantQuerySet, ResearcherRequest
from libs.streaming_zip import zip_generator
from libs.utils.date_utils import daterange
from middleware.abort_middleware import abort
from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer
@require_GET
@authenticate_researcher_study_access
@forest_enabled
def analysis_progress(request: ResearcherRequest, study_id=None):
study: Study = Study.objects.get(pk=study_id)
participants: ParticipantQuerySet = Participant.objects.filter(study=study_id)
# generate chart of study analysis progress logs
trackers = ForestTask.objects.filter(participant__in=participants).order_by("created_on")
start_date = (study.get_earliest_data_time_bin() or study.created_on).date()
end_date = (study.get_latest_data_time_bin() or timezone.now()).date()
# this code simultaneously builds up the chart of most recent forest results for date ranges
# by participant and tree, and tracks the metadata
params = dict()
results = defaultdict(lambda: "--")
tracker: ForestTask
for tracker in trackers:
for date in daterange(tracker.data_date_start, tracker.data_date_end, inclusive=True):
results[(tracker.participant_id, tracker.forest_tree, date)] = tracker.status
if tracker.status == tracker.status.success:
params[(tracker.participant_id, tracker.forest_tree, date)] = tracker.forest_param_id
else:
params[(tracker.participant_id, tracker.forest_tree, date)] = None
# generate the date range for charting
dates = list(daterange(start_date, end_date, inclusive=True))
chart = []
for participant in participants:
for tree in ForestTree.values():
row = [participant.patient_id, tree] + \
[results[(participant.id, tree, date)] for date in dates]
chart.append(row)
# ensure that within each tree, only a single set of param values are used (only the most recent runs
# are considered, and unsuccessful runs are assumed to invalidate old runs, clearing params)
params_conflict = False
for tree in set([k[1] for k in params.keys()]):
if len(set([m for k, m in params.items() if m is not None and k[1] == tree])) > 1:
params_conflict = True
break
return render(
request,
'forest/analysis_progress.html',
context=dict(
study=study,
chart_columns=["participant", "tree"] + dates,
status_choices=ForestTaskStatus,
params_conflict=params_conflict,
start_date=start_date,
end_date=end_date,
chart=chart # this uses the jinja safe filter and should never involve user input
)
)
@require_http_methods(['GET', 'POST'])
@authenticate_admin
@forest_enabled
def create_tasks(request: ResearcherRequest, study_id=None):
# Only a SITE admin can queue forest tasks
if not request.session_researcher.site_admin:
return abort(403)
try:
study = Study.objects.get(pk=study_id)
except Study.DoesNotExist:
return abort(404)
# FIXME: remove this double endpoint pattern, it is bad.
if request.method == "GET":
return render_create_tasks(request, study)
form = CreateTasksForm(data=request.POST, study=study)
if not form.is_valid():
error_messages = [
f'"{field}": {message}'
for field, messages in form.errors.items()
for message in messages
]
error_messages_string = "\n".join(error_messages)
messages.warning(request, f"Errors:\n\n{error_messages_string}")
return render_create_tasks(request, study)
form.save()
messages.success(request, "Forest tasks successfully queued!")
return redirect(easy_url("forest_pages.task_log", study_id=study_id))
@require_GET
@authenticate_researcher_study_access
@forest_enabled
def task_log(request: ResearcherRequest, study_id=None):
study = Study.objects.get(pk=study_id)
forest_tasks = ForestTask.objects.filter(participant__study_id=study_id).order_by("-created_on")
return render(
request,
"forest/task_log.html",
context=dict(
study=study,
is_site_admin=request.session_researcher.site_admin,
status_choices=ForestTaskStatus,
forest_log=ForestTaskSerializer(forest_tasks, many=True).data,
)
)
@require_GET
@authenticate_admin
def download_task_log(request: ResearcherRequest):
forest_tasks = ForestTask.objects.order_by("created_on")
return FileResponse(
stream_forest_task_log_csv(forest_tasks),
content_type="text/csv",
filename=f"forest_task_log_{timezone.now().isoformat()}.csv",
as_attachment=True,
)
@require_POST
@authenticate_admin
@forest_enabled
def cancel_task(request: ResearcherRequest, study_id, forest_task_external_id):
if not request.session_researcher.site_admin:
return abort(403)
number_updated = \
ForestTask.objects.filter(
external_id=forest_task_external_id, status=ForestTaskStatus.queued
).update(
status=ForestTaskStatus.cancelled,
stacktrace=f"Canceled by {request.session_researcher.username} on {datetime.date.today()}",
)
if number_updated > 0:
messages.success(request, "Forest task successfully cancelled.")
else:
messages.warning(request, "Sorry, we were unable to find or cancel this Forest task.")
return redirect(easy_url("forest_pages.task_log", study_id=study_id))
@require_GET
@authenticate_admin
@forest_enabled
def download_task_data(request: ResearcherRequest, study_id, forest_task_external_id):
try:
tracker: ForestTask = ForestTask.objects.get(
external_id=forest_task_external_id, participant__study_id=study_id
)
except ForestTask.DoesNotExist:
return abort(404)
chunks = ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS)
f = FileResponse(
zip_generator(chunks),
content_type="zip",
as_attachment=True,
filename=f"{tracker.get_slug()}.zip",
)
f.set_headers(None)
return f
def stream_forest_task_log_csv(forest_tasks):
buffer = CSVBuffer()
writer = csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields)
writer.writeheader()
yield buffer.read()
for forest_task in forest_tasks:
writer.writerow(ForestTaskCsvSerializer(forest_task).data)
yield buffer.read()
def render_create_tasks(request: ResearcherRequest, study: Study):
participants = Participant.objects.filter(study=study)
try:
start_date = ChunkRegistry.objects.filter(participant__in=participants).earliest("time_bin")
end_date = ChunkRegistry.objects.filter(participant__in=participants).latest("time_bin")
start_date = start_date.time_bin.date()
end_date = end_date.time_bin.date()
except ChunkRegistry.DoesNotExist:
start_date = study.created_on.date()
end_date = timezone.now().date()
return render(
request,
"forest/create_tasks.html",
context=dict(
study=study,
participants=list(
study.participants.order_by("patient_id").values_list("patient_id", flat=True)
),
trees=ForestTree.choices(),
start_date=start_date.strftime('%Y-%m-%d'),
end_date=end_date.strftime('%Y-%m-%d')
)
)
class CSVBuffer:
line = ""
def read(self):
return self.line
def write(self, line):
self.line = line
|
[
"csv.DictWriter",
"forms.django_forms.CreateTasksForm",
"libs.streaming_zip.zip_generator",
"django.contrib.messages.warning",
"libs.http_utils.easy_url",
"libs.utils.date_utils.daterange",
"serializers.forest_serializers.ForestTaskCsvSerializer",
"constants.forest_constants.ForestTree.values",
"database.data_access_models.ChunkRegistry.objects.filter",
"database.tableau_api_models.ForestTask.objects.order_by",
"django.utils.timezone.now",
"database.tableau_api_models.ForestTask.objects.get",
"database.user_models.Participant.objects.filter",
"serializers.forest_serializers.ForestTaskSerializer",
"middleware.abort_middleware.abort",
"datetime.date.today",
"constants.forest_constants.ForestTree.choices",
"database.study_models.Study.objects.get",
"django.views.decorators.http.require_http_methods",
"database.tableau_api_models.ForestTask.objects.filter",
"collections.defaultdict",
"django.contrib.messages.success"
] |
[((3696, 3733), 'django.views.decorators.http.require_http_methods', 'require_http_methods', (["['GET', 'POST']"], {}), "(['GET', 'POST'])\n", (3716, 3733), False, 'from django.views.decorators.http import require_GET, require_http_methods, require_POST\n'), ((1295, 1325), 'database.study_models.Study.objects.get', 'Study.objects.get', ([], {'pk': 'study_id'}), '(pk=study_id)\n', (1312, 1325), False, 'from database.study_models import Study\n'), ((1366, 1408), 'database.user_models.Participant.objects.filter', 'Participant.objects.filter', ([], {'study': 'study_id'}), '(study=study_id)\n', (1392, 1408), False, 'from database.user_models import Participant\n'), ((1913, 1939), 'collections.defaultdict', 'defaultdict', (["(lambda : '--')"], {}), "(lambda : '--')\n", (1924, 1939), False, 'from collections import defaultdict\n'), ((4227, 4274), 'forms.django_forms.CreateTasksForm', 'CreateTasksForm', ([], {'data': 'request.POST', 'study': 'study'}), '(data=request.POST, study=study)\n', (4242, 4274), False, 'from forms.django_forms import CreateTasksForm\n'), ((4679, 4741), 'django.contrib.messages.success', 'messages.success', (['request', '"""Forest tasks successfully queued!"""'], {}), "(request, 'Forest tasks successfully queued!')\n", (4695, 4741), False, 'from django.contrib import messages\n'), ((4954, 4984), 'database.study_models.Study.objects.get', 'Study.objects.get', ([], {'pk': 'study_id'}), '(pk=study_id)\n', (4971, 4984), False, 'from database.study_models import Study\n'), ((5507, 5548), 'database.tableau_api_models.ForestTask.objects.order_by', 'ForestTask.objects.order_by', (['"""created_on"""'], {}), "('created_on')\n", (5534, 5548), False, 'from database.tableau_api_models import ForestTask\n'), ((7323, 7393), 'csv.DictWriter', 'csv.DictWriter', (['buffer'], {'fieldnames': 'ForestTaskCsvSerializer.Meta.fields'}), '(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields)\n', (7337, 7393), False, 'import csv\n'), ((7668, 7707), 'database.user_models.Participant.objects.filter', 'Participant.objects.filter', ([], {'study': 'study'}), '(study=study)\n', (7694, 7707), False, 'from database.user_models import Participant\n'), ((2012, 2085), 'libs.utils.date_utils.daterange', 'daterange', (['tracker.data_date_start', 'tracker.data_date_end'], {'inclusive': '(True)'}), '(tracker.data_date_start, tracker.data_date_end, inclusive=True)\n', (2021, 2085), False, 'from libs.utils.date_utils import daterange\n'), ((2502, 2549), 'libs.utils.date_utils.daterange', 'daterange', (['start_date', 'end_date'], {'inclusive': '(True)'}), '(start_date, end_date, inclusive=True)\n', (2511, 2549), False, 'from libs.utils.date_utils import daterange\n'), ((2628, 2647), 'constants.forest_constants.ForestTree.values', 'ForestTree.values', ([], {}), '()\n', (2645, 2647), False, 'from constants.forest_constants import ForestTaskStatus, ForestTree\n'), ((3943, 3953), 'middleware.abort_middleware.abort', 'abort', (['(403)'], {}), '(403)\n', (3948, 3953), False, 'from middleware.abort_middleware import abort\n'), ((3979, 4009), 'database.study_models.Study.objects.get', 'Study.objects.get', ([], {'pk': 'study_id'}), '(pk=study_id)\n', (3996, 4009), False, 'from database.study_models import Study\n'), ((4538, 4604), 'django.contrib.messages.warning', 'messages.warning', (['request', 'f"""Errors:\n\n{error_messages_string}"""'], {}), '(request, f"""Errors:\n\n{error_messages_string}""")\n', (4554, 4604), False, 'from django.contrib import messages\n'), ((4762, 4814), 'libs.http_utils.easy_url', 'easy_url', (['"""forest_pages.task_log"""'], {'study_id': 'study_id'}), "('forest_pages.task_log', study_id=study_id)\n", (4770, 4814), False, 'from libs.http_utils import easy_url\n'), ((5958, 5968), 'middleware.abort_middleware.abort', 'abort', (['(403)'], {}), '(403)\n', (5963, 5968), False, 'from middleware.abort_middleware import abort\n'), ((6335, 6399), 'django.contrib.messages.success', 'messages.success', (['request', '"""Forest task successfully cancelled."""'], {}), "(request, 'Forest task successfully cancelled.')\n", (6351, 6399), False, 'from django.contrib import messages\n'), ((6418, 6508), 'django.contrib.messages.warning', 'messages.warning', (['request', '"""Sorry, we were unable to find or cancel this Forest task."""'], {}), "(request,\n 'Sorry, we were unable to find or cancel this Forest task.')\n", (6434, 6508), False, 'from django.contrib import messages\n'), ((6530, 6582), 'libs.http_utils.easy_url', 'easy_url', (['"""forest_pages.task_log"""'], {'study_id': 'study_id'}), "('forest_pages.task_log', study_id=study_id)\n", (6538, 6582), False, 'from libs.http_utils import easy_url\n'), ((6761, 6856), 'database.tableau_api_models.ForestTask.objects.get', 'ForestTask.objects.get', ([], {'external_id': 'forest_task_external_id', 'participant__study_id': 'study_id'}), '(external_id=forest_task_external_id,\n participant__study_id=study_id)\n', (6783, 6856), False, 'from database.tableau_api_models import ForestTask\n'), ((7069, 7090), 'libs.streaming_zip.zip_generator', 'zip_generator', (['chunks'], {}), '(chunks)\n', (7082, 7090), False, 'from libs.streaming_zip import zip_generator\n'), ((1482, 1537), 'database.tableau_api_models.ForestTask.objects.filter', 'ForestTask.objects.filter', ([], {'participant__in': 'participants'}), '(participant__in=participants)\n', (1507, 1537), False, 'from database.tableau_api_models import ForestTask\n'), ((4056, 4066), 'middleware.abort_middleware.abort', 'abort', (['(404)'], {}), '(404)\n', (4061, 4066), False, 'from middleware.abort_middleware import abort\n'), ((5004, 5061), 'database.tableau_api_models.ForestTask.objects.filter', 'ForestTask.objects.filter', ([], {'participant__study_id': 'study_id'}), '(participant__study_id=study_id)\n', (5029, 5061), False, 'from database.tableau_api_models import ForestTask\n'), ((6009, 6108), 'database.tableau_api_models.ForestTask.objects.filter', 'ForestTask.objects.filter', ([], {'external_id': 'forest_task_external_id', 'status': 'ForestTaskStatus.queued'}), '(external_id=forest_task_external_id, status=\n ForestTaskStatus.queued)\n', (6034, 6108), False, 'from database.tableau_api_models import ForestTask\n'), ((6926, 6936), 'middleware.abort_middleware.abort', 'abort', (['(404)'], {}), '(404)\n', (6931, 6936), False, 'from middleware.abort_middleware import abort\n'), ((6955, 7016), 'database.data_access_models.ChunkRegistry.objects.filter', 'ChunkRegistry.objects.filter', ([], {'participant': 'tracker.participant'}), '(participant=tracker.participant)\n', (6983, 7016), False, 'from database.data_access_models import ChunkRegistry\n'), ((1699, 1713), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1711, 1713), False, 'from django.utils import timezone\n'), ((7509, 7545), 'serializers.forest_serializers.ForestTaskCsvSerializer', 'ForestTaskCsvSerializer', (['forest_task'], {}), '(forest_task)\n', (7532, 7545), False, 'from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer\n'), ((7738, 7796), 'database.data_access_models.ChunkRegistry.objects.filter', 'ChunkRegistry.objects.filter', ([], {'participant__in': 'participants'}), '(participant__in=participants)\n', (7766, 7796), False, 'from database.data_access_models import ChunkRegistry\n'), ((7837, 7895), 'database.data_access_models.ChunkRegistry.objects.filter', 'ChunkRegistry.objects.filter', ([], {'participant__in': 'participants'}), '(participant__in=participants)\n', (7865, 7895), False, 'from database.data_access_models import ChunkRegistry\n'), ((6260, 6281), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (6279, 6281), False, 'import datetime\n'), ((8110, 8124), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (8122, 8124), False, 'from django.utils import timezone\n'), ((8410, 8430), 'constants.forest_constants.ForestTree.choices', 'ForestTree.choices', ([], {}), '()\n', (8428, 8430), False, 'from constants.forest_constants import ForestTaskStatus, ForestTree\n'), ((5334, 5379), 'serializers.forest_serializers.ForestTaskSerializer', 'ForestTaskSerializer', (['forest_tasks'], {'many': '(True)'}), '(forest_tasks, many=True)\n', (5354, 5379), False, 'from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer\n'), ((5693, 5707), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (5705, 5707), False, 'from django.utils import timezone\n')]
|
#
# this manager stores directly into the db wit Database update
from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate
from cloudmesh.mongo.CmDatabase import CmDatabase
from cloudmesh.common.console import Console
from cloudmesh.storage.Provider import Provider
import os
from datetime import datetime
class Vdir(object):
def __init__(self):
self.cm = CmDatabase()
self.col = self.cm.db['local-vdir']
self.directory = 'vdir'
def cd(self, dirname=None):
try:
if dirname is None:
if self.directory == 'vdir':
Console.error("Root directory reached.")
else:
cwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})
self.directory = cwd['parent']
pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})
return pwd
else:
directory = self.col.find_one({'type': 'directory', 'cm.name': dirname})
if directory['parent'] == self.directory:
self.directory = dirname
pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})
return pwd
else:
Console.error('Directory does not exist at this location.')
except Exception as e:
print(e)
@DatabaseUpdate()
def mkdir(self, dirname):
try:
directory = self.col.find_one({"cm.name": dirname, 'type': 'directory'})
if directory is None:
dir_dict = dict()
dir_dict['cm'] = {
'name': dirname,
'kind': 'vdir',
'cloud': 'local'
}
dir_dict['type'] = 'directory'
dir_dict['parent'] = self.directory
dir_dict['cm']['created'] = datetime.utcnow()
dir_dict['cm']['modified'] = datetime.utcnow()
return dir_dict
else:
Console.error("Directory with that name exists.")
except Exception as e:
print(e)
def ls(self, directory=None):
try:
dash = '-' * 40
if directory is not None:
cloudmesh = self.col.find({'$or': [{'vdirectory': directory}, {'parent': directory}]})
count = self.col.count_documents({'$or': [{'vdirectory': directory}, {'parent': directory}]})
else:
cloudmesh = self.col.find({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]})
count = self.col.count_documents({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]})
locations = "{:<20} {:>}".format("Name", "Location") + "\n" + dash + "\n"
for i in range(0, count):
entry = cloudmesh[i]
if entry['type'] == 'fileendpoint':
location = entry['provider'] + ":" + entry['cloud_directory'] + "/" + entry['filename']
else:
if self.directory == '':
location = 'Vdir'
else:
location = self.directory
locations += "{:<20} {:>}".format(entry['cm']['name'], location) + "\n"
print(locations)
return locations
except Exception as e:
print(e)
@DatabaseUpdate()
def add(self, endpoint, dir_and_name):
try:
dirname = os.path.dirname(dir_and_name).split('/')[-1]
if dirname == '':
dirname = 'vdir'
directory = 'vdir'
else:
directory = self.col.find_one({"cm.name": dirname, 'type': 'directory'})
filename = os.path.basename(dir_and_name)
file = self.col.find_one({"cm.name": filename, 'type': 'fileendpoint'})
if directory is not None and file is None:
file_dict = dict()
file_dict['cm'] = {
'name': filename,
'kind': 'vdir',
'cloud': 'local'
}
file_dict['type'] = 'fileendpoint'
file_dict['vdirectory'] = dirname
file_dict['cloud_directory'] = os.path.dirname(endpoint).split(':')[1]
file_dict['filename'] = os.path.basename(endpoint)
file_dict['provider'] = os.path.dirname(endpoint).split(':')[0]
file_dict['cm']['created'] = datetime.utcnow()
file_dict['cm']['modified'] = datetime.utcnow()
return file_dict
elif directory is None:
Console.error("Virtual directory not found.")
elif file is not None:
print(file)
Console.error("File with that name already exists.")
except Exception as e:
print(e)
def get(self, name, destination=None):
try:
doc = self.col.find_one({'cm.name': name, 'type': 'fileendpoint'})
if doc is not None:
self.col.update_one({'cm.name': name, 'type': 'fileendpoint'},
{'$set': {'modified': datetime.utcnow()}})
service = doc['provider']
source = os.path.join(doc['cloud_directory'], doc['filename'])
print(source)
if destination is None:
destination = '~/.cloudmesh/vdir'
p = Provider(service)
file = p.get(source, destination, False)
return file
else:
Console.error("File not found.")
except Exception as e:
print(e)
def delete(self, dir_or_name):
try:
result = self.col.find_one({'cm.name': dir_or_name})
self.col.delete_one({'cm.name': dir_or_name})
return result
except Exception as e:
print(e)
def status(self, dir_or_name):
try:
result = self.col.find_one({'cm.name': dir_or_name})
return result
except Exception as e:
print(e)
|
[
"cloudmesh.storage.Provider.Provider",
"datetime.datetime.utcnow",
"cloudmesh.mongo.DataBaseDecorator.DatabaseUpdate",
"os.path.join",
"os.path.dirname",
"os.path.basename",
"cloudmesh.mongo.CmDatabase.CmDatabase",
"cloudmesh.common.console.Console.error"
] |
[((1435, 1451), 'cloudmesh.mongo.DataBaseDecorator.DatabaseUpdate', 'DatabaseUpdate', ([], {}), '()\n', (1449, 1451), False, 'from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate\n'), ((3491, 3507), 'cloudmesh.mongo.DataBaseDecorator.DatabaseUpdate', 'DatabaseUpdate', ([], {}), '()\n', (3505, 3507), False, 'from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate\n'), ((375, 387), 'cloudmesh.mongo.CmDatabase.CmDatabase', 'CmDatabase', ([], {}), '()\n', (385, 387), False, 'from cloudmesh.mongo.CmDatabase import CmDatabase\n'), ((3859, 3889), 'os.path.basename', 'os.path.basename', (['dir_and_name'], {}), '(dir_and_name)\n', (3875, 3889), False, 'import os\n'), ((1954, 1971), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1969, 1971), False, 'from datetime import datetime\n'), ((2017, 2034), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2032, 2034), False, 'from datetime import datetime\n'), ((2101, 2150), 'cloudmesh.common.console.Console.error', 'Console.error', (['"""Directory with that name exists."""'], {}), "('Directory with that name exists.')\n", (2114, 2150), False, 'from cloudmesh.common.console import Console\n'), ((4457, 4483), 'os.path.basename', 'os.path.basename', (['endpoint'], {}), '(endpoint)\n', (4473, 4483), False, 'import os\n'), ((4609, 4626), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4624, 4626), False, 'from datetime import datetime\n'), ((4673, 4690), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4688, 4690), False, 'from datetime import datetime\n'), ((5399, 5452), 'os.path.join', 'os.path.join', (["doc['cloud_directory']", "doc['filename']"], {}), "(doc['cloud_directory'], doc['filename'])\n", (5411, 5452), False, 'import os\n'), ((5597, 5614), 'cloudmesh.storage.Provider.Provider', 'Provider', (['service'], {}), '(service)\n', (5605, 5614), False, 'from cloudmesh.storage.Provider import Provider\n'), ((5734, 5766), 'cloudmesh.common.console.Console.error', 'Console.error', (['"""File not found."""'], {}), "('File not found.')\n", (5747, 5766), False, 'from cloudmesh.common.console import Console\n'), ((607, 647), 'cloudmesh.common.console.Console.error', 'Console.error', (['"""Root directory reached."""'], {}), "('Root directory reached.')\n", (620, 647), False, 'from cloudmesh.common.console import Console\n'), ((1317, 1376), 'cloudmesh.common.console.Console.error', 'Console.error', (['"""Directory does not exist at this location."""'], {}), "('Directory does not exist at this location.')\n", (1330, 1376), False, 'from cloudmesh.common.console import Console\n'), ((4776, 4821), 'cloudmesh.common.console.Console.error', 'Console.error', (['"""Virtual directory not found."""'], {}), "('Virtual directory not found.')\n", (4789, 4821), False, 'from cloudmesh.common.console import Console\n'), ((3586, 3615), 'os.path.dirname', 'os.path.dirname', (['dir_and_name'], {}), '(dir_and_name)\n', (3601, 3615), False, 'import os\n'), ((4901, 4953), 'cloudmesh.common.console.Console.error', 'Console.error', (['"""File with that name already exists."""'], {}), "('File with that name already exists.')\n", (4914, 4953), False, 'from cloudmesh.common.console import Console\n'), ((4377, 4402), 'os.path.dirname', 'os.path.dirname', (['endpoint'], {}), '(endpoint)\n', (4392, 4402), False, 'import os\n'), ((4524, 4549), 'os.path.dirname', 'os.path.dirname', (['endpoint'], {}), '(endpoint)\n', (4539, 4549), False, 'import os\n'), ((5311, 5328), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5326, 5328), False, 'from datetime import datetime\n')]
|
import json
import logging
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
from influxdb import InfluxDBClusterClient
enabled = True
except ImportError:
enabled = False
def _transform_result(results):
result_columns = []
result_rows = []
for result in results:
for series in result.raw.get('series', []):
for column in series['columns']:
if column not in result_columns:
result_columns.append(column)
tags = series.get('tags', {})
for key in tags.keys():
if key not in result_columns:
result_columns.append(key)
for result in results:
for series in result.raw.get('series', []):
for point in series['values']:
result_row = {}
for column in result_columns:
tags = series.get('tags', {})
if column in tags:
result_row[column] = tags[column]
elif column in series['columns']:
index = series['columns'].index(column)
value = point[index]
result_row[column] = value
result_rows.append(result_row)
return json.dumps({
"columns": [{'name': c} for c in result_columns],
"rows": result_rows
}, cls=JSONEncoder)
class InfluxDB(BaseQueryRunner):
noop_query = "show measurements limit 1"
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'url': {
'type': 'string'
}
},
'required': ['url']
}
@classmethod
def enabled(cls):
return enabled
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "influxdb"
def run_query(self, query, user):
client = InfluxDBClusterClient.from_DSN(self.configuration['url'])
logger.debug("influxdb url: %s", self.configuration['url'])
logger.debug("influxdb got query: %s", query)
try:
results = client.query(query)
if not isinstance(results, list):
results = [results]
json_data = _transform_result(results)
error = None
except Exception as ex:
json_data = None
error = ex.message
return json_data, error
register(InfluxDB)
|
[
"logging.getLogger",
"influxdb.InfluxDBClusterClient.from_DSN",
"json.dumps"
] |
[((109, 136), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (126, 136), False, 'import logging\n'), ((1345, 1449), 'json.dumps', 'json.dumps', (["{'columns': [{'name': c} for c in result_columns], 'rows': result_rows}"], {'cls': 'JSONEncoder'}), "({'columns': [{'name': c} for c in result_columns], 'rows':\n result_rows}, cls=JSONEncoder)\n", (1355, 1449), False, 'import json\n'), ((2063, 2120), 'influxdb.InfluxDBClusterClient.from_DSN', 'InfluxDBClusterClient.from_DSN', (["self.configuration['url']"], {}), "(self.configuration['url'])\n", (2093, 2120), False, 'from influxdb import InfluxDBClusterClient\n')]
|
import numpy as np
if __name__ == '__main__':
h, w = map( int, input().split() )
row_list = []
for i in range(h):
single_row = list( map(int, input().split() ) )
np_row = np.array( single_row )
row_list.append( np_row )
min_of_each_row = np.min( row_list, axis = 1)
max_of_min = np.max( min_of_each_row )
print( max_of_min )
|
[
"numpy.max",
"numpy.array",
"numpy.min"
] |
[((288, 312), 'numpy.min', 'np.min', (['row_list'], {'axis': '(1)'}), '(row_list, axis=1)\n', (294, 312), True, 'import numpy as np\n'), ((334, 357), 'numpy.max', 'np.max', (['min_of_each_row'], {}), '(min_of_each_row)\n', (340, 357), True, 'import numpy as np\n'), ((206, 226), 'numpy.array', 'np.array', (['single_row'], {}), '(single_row)\n', (214, 226), True, 'import numpy as np\n')]
|
import uuid
import pickle
import pytest
import argparse
from collections import namedtuple
from six import text_type
from allure.common import AllureImpl, StepContext
from allure.constants import Status, AttachmentType, Severity, \
FAILED_STATUSES, Label, SKIPPED_STATUSES
from allure.utils import parent_module, parent_down_from_module, labels_of, \
all_of, get_exception_message, now, mangle_testnames
from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel
def pytest_addoption(parser):
parser.getgroup("reporting").addoption('--alluredir',
action="store",
dest="allurereportdir",
metavar="DIR",
default=None,
help="Generate Allure report in the specified directory (may not exist)")
severities = [v for (_, v) in all_of(Severity)]
def label_type(name, legal_values=set()):
"""
argparse-type factory for labelish things.
processed value is set of tuples (name, value).
:param name: of label type (for future TestLabel things)
:param legal_values: a `set` of values that are legal for this label, if any limit whatsoever
:raises ArgumentTypeError: if `legal_values` are given and there are values that fall out of that
"""
def a_label_type(string):
atoms = set(string.split(','))
if legal_values and not atoms < legal_values:
raise argparse.ArgumentTypeError('Illegal {} values: {}, only [{}] are allowed'.format(name, ', '.join(atoms - legal_values), ', '.join(legal_values)))
return set((name, v) for v in atoms)
return a_label_type
parser.getgroup("general").addoption('--allure_severities',
action="store",
dest="allureseverities",
metavar="SEVERITIES_SET",
default={},
type=label_type(name=Label.SEVERITY, legal_values=set(severities)),
help="""Comma-separated list of severity names.
Tests only with these severities will be run.
Possible values are:%s.""" % ', '.join(severities))
parser.getgroup("general").addoption('--allure_features',
action="store",
dest="allurefeatures",
metavar="FEATURES_SET",
default={},
type=label_type(name=Label.FEATURE),
help="""Comma-separated list of feature names.
Run tests that have at least one of the specified feature labels.""")
parser.getgroup("general").addoption('--allure_stories',
action="store",
dest="allurestories",
metavar="STORIES_SET",
default={},
type=label_type(name=Label.STORY),
help="""Comma-separated list of story names.
Run tests that have at least one of the specified story labels.""")
def pytest_configure(config):
reportdir = config.option.allurereportdir
if reportdir: # we actually record something
allure_impl = AllureImpl(reportdir)
testlistener = AllureTestListener(config)
pytest.allure._allurelistener = testlistener
config.pluginmanager.register(testlistener)
if not hasattr(config, 'slaveinput'):
# on xdist-master node do all the important stuff
config.pluginmanager.register(AllureAgregatingListener(allure_impl, config))
config.pluginmanager.register(AllureCollectionListener(allure_impl))
class AllureTestListener(object):
"""
Per-test listener.
Is responsible for recording in-test data and for attaching it to the test report thing.
The per-test reports are handled by `AllureAgregatingListener` at the `pytest_runtest_logreport` hook.
"""
def __init__(self, config):
self.config = config
self.environment = {}
self.test = None
# FIXME: that flag makes us pre-report failures in the makereport hook.
# it is here to cope with xdist's begavior regarding -x.
# see self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish
self._magicaldoublereport = hasattr(self.config, 'slaveinput') and self.config.getvalue("maxfail")
@pytest.mark.hookwrapper
def pytest_runtest_protocol(self, item, nextitem):
try:
# for common items
description = item.function.__doc__
except AttributeError:
# for doctests that has no `function` attribute
description = item.reportinfo()[2]
self.test = TestCase(name='.'.join(mangle_testnames([x.name for x in parent_down_from_module(item)])),
description=description,
start=now(),
attachments=[],
labels=labels_of(item),
status=None,
steps=[],
id=str(uuid.uuid4())) # for later resolution in AllureAgregatingListener.pytest_sessionfinish
self.stack = [self.test]
yield
self.test = None
self.stack = []
def attach(self, title, contents, attach_type):
"""
Store attachment object in current state for later actual write in the `AllureAgregatingListener.write_attach`
"""
attach = Attach(source=contents, # we later re-save those, oh my...
title=title,
type=attach_type)
self.stack[-1].attachments.append(attach)
def dynamic_issue(self, *issues):
"""
Attaches ``issues`` to the current active case
"""
if self.test:
self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue) for issue in issues])
def description(self, description):
"""
Sets description for the test
"""
if self.test:
self.test.description = description
def start_step(self, name):
"""
Starts an new :py:class:`allure.structure.TestStep` with given ``name``,
pushes it to the ``self.stack`` and returns the step.
"""
step = TestStep(name=name,
title=name,
start=now(),
attachments=[],
steps=[])
self.stack[-1].steps.append(step)
self.stack.append(step)
return step
def stop_step(self):
"""
Stops the step at the top of ``self.stack``
"""
step = self.stack.pop()
step.stop = now()
def _fill_case(self, report, call, pyteststatus, status):
"""
Finalizes with important data
:param report: py.test's `TestReport`
:param call: py.test's `CallInfo`
:param pyteststatus: the failed/xfailed/xpassed thing
:param status: a :py:class:`allure.constants.Status` entry
"""
[self.attach(name, contents, AttachmentType.TEXT) for (name, contents) in dict(report.sections).items()]
self.test.stop = now()
self.test.status = status
if status in FAILED_STATUSES:
self.test.failure = Failure(message=get_exception_message(call.excinfo, pyteststatus, report),
trace=report.longrepr or hasattr(report, 'wasxfail') and report.wasxfail)
elif status in SKIPPED_STATUSES:
skip_message = type(report.longrepr) == tuple and report.longrepr[2] or report.wasxfail
trim_msg_len = 89
short_message = skip_message.split('\n')[0][:trim_msg_len]
# FIXME: see pytest.runner.pytest_runtest_makereport
self.test.failure = Failure(message=(short_message + '...' * (len(skip_message) > trim_msg_len)),
trace=status == Status.PENDING and report.longrepr or short_message != skip_message and skip_message or '')
def report_case(self, item, report):
"""
Adds `self.test` to the `report` in a `AllureAggegatingListener`-understood way
"""
parent = parent_module(item)
# we attach a four-tuple: (test module ID, test module name, test module doc, environment, TestCase)
report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid,
parent.module.__name__,
parent.module.__doc__ or '',
self.environment,
self.test)))
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
"""
Decides when to actually report things.
pytest runs this (naturally) three times -- with report.when being:
setup <--- fixtures are to be initialized in this one
call <--- when this finishes the main code has finished
teardown <--- tears down fixtures (that still possess important info)
`setup` and `teardown` are always called, but `call` is called only if `setup` passes.
See :py:func:`_pytest.runner.runtestprotocol` for proofs / ideas.
The "other side" (AllureAggregatingListener) expects us to send EXACTLY ONE test report (it wont break, but it will duplicate cases in the report -- which is bad.
So we work hard to decide exact moment when we call `_stop_case` to do that. This method may benefit from FSM (we keep track of what has already happened via self.test.status)
Expected behavior is:
FAILED when call fails and others OK
BROKEN when either setup OR teardown are broken (and call may be anything)
PENDING if skipped and xfailed
SKIPPED if skipped and not xfailed
"""
report = (yield).get_result()
status = self.config.hook.pytest_report_teststatus(report=report)
status = status and status[0]
if report.when == 'call':
if report.passed:
self._fill_case(report, call, status, Status.PASSED)
elif report.failed:
self._fill_case(report, call, status, Status.FAILED)
# FIXME: this is here only to work around xdist's stupid -x thing when in exits BEFORE THE TEARDOWN test log. Meh, i should file an issue to xdist
if self._magicaldoublereport:
# to minimize ze impact
self.report_case(item, report)
elif report.skipped:
if hasattr(report, 'wasxfail'):
self._fill_case(report, call, status, Status.PENDING)
else:
self._fill_case(report, call, status, Status.CANCELED)
elif report.when == 'setup': # setup / teardown
if report.failed:
self._fill_case(report, call, status, Status.BROKEN)
elif report.skipped:
if hasattr(report, 'wasxfail'):
self._fill_case(report, call, status, Status.PENDING)
else:
self._fill_case(report, call, status, Status.CANCELED)
elif report.when == 'teardown':
# as teardown is always called for testitem -- report our status here
if not report.passed:
if self.test.status not in FAILED_STATUSES:
# if test was OK but failed at teardown => broken
self._fill_case(report, call, status, Status.BROKEN)
else:
# mark it broken so, well, someone has idea of teardown failure
# still, that's no big deal -- test has already failed
# TODO: think about that once again
self.test.status = Status.BROKEN
# if a test isn't marked as "unreported" or it has failed, add it to the report.
if not item.get_marker("unreported") or self.test.status in FAILED_STATUSES:
self.report_case(item, report)
def pytest_runtest_setup(item):
item_labels = set((l.name, l.value) for l in labels_of(item)) # see label_type
arg_labels = set().union(item.config.option.allurefeatures,
item.config.option.allurestories,
item.config.option.allureseverities)
if arg_labels and not item_labels & arg_labels:
pytest.skip('Not suitable with selected labels: %s.' % ', '.join(text_type(l) for l in sorted(arg_labels)))
class LazyInitStepContext(StepContext):
"""
This is a step context used for decorated steps.
It provides a possibility to create step decorators, being initiated before pytest_configure, when no AllureListener initiated yet.
"""
def __init__(self, allure_helper, title):
self.allure_helper = allure_helper
self.title = title
self.step = None
@property
def allure(self):
listener = self.allure_helper.get_listener()
# if listener has `stack` we are inside a test
# record steps only when that
# FIXME: this breaks encapsulation a lot
if hasattr(listener, 'stack'):
return listener
class AllureHelper(object):
"""
This object holds various utility methods used from ``pytest.allure`` namespace, like ``pytest.allure.attach``
"""
def __init__(self):
self._allurelistener = None # FIXME: this gets injected elsewhere, like in the pytest_configure
def get_listener(self):
return self._allurelistener
def attach(self, name, contents, type=AttachmentType.TEXT): # @ReservedAssignment
"""
Attaches ``contents`` to a current context with given ``name`` and ``type``.
"""
if self._allurelistener:
self._allurelistener.attach(name, contents, type)
def label(self, name, *value):
"""
A decorator factory that returns ``pytest.mark`` for a given label.
"""
allure_label = getattr(pytest.mark, '%s.%s' % (Label.DEFAULT, name))
return allure_label(*value)
def severity(self, severity):
"""
A decorator factory that returns ``pytest.mark`` for a given allure ``level``.
"""
return self.label(Label.SEVERITY, severity)
def feature(self, *features):
"""
A decorator factory that returns ``pytest.mark`` for a given features.
"""
return self.label(Label.FEATURE, *features)
def story(self, *stories):
"""
A decorator factory that returns ``pytest.mark`` for a given stories.
"""
return self.label(Label.STORY, *stories)
def issue(self, *issues):
"""
A decorator factory that returns ``pytest.mark`` for a given issues.
"""
return self.label(Label.ISSUE, *issues)
def dynamic_issue(self, *issues):
"""
Mark test ``issues`` from inside.
"""
if self._allurelistener:
self._allurelistener.dynamic_issue(*issues)
def description(self, description):
"""
Sets description for the test
"""
if self._allurelistener:
self._allurelistener.description(description)
def testcase(self, *testcases):
"""
A decorator factory that returns ``pytest.mark`` for a given testcases.
"""
return self.label(Label.TESTCASE, *testcases)
def step(self, title):
"""
A contextmanager/decorator for steps.
TODO: when moving to python 3, rework this with ``contextlib.ContextDecorator``.
Usage examples::
import pytest
def test_foo():
with pytest.allure.step('mystep'):
assert False
@pytest.allure.step('make test data')
def make_test_data_bar():
raise ValueError('No data today')
def test_bar():
assert make_test_data_bar()
@pytest.allure.step
def make_test_data_baz():
raise ValueError('No data today')
def test_baz():
assert make_test_data_baz()
@pytest.fixture()
@pytest.allure.step('test fixture')
def steppy_fixture():
return 1
def test_baz(steppy_fixture):
assert steppy_fixture
"""
if callable(title):
return LazyInitStepContext(self, title.__name__)(title)
else:
return LazyInitStepContext(self, title)
def single_step(self, text):
"""
Writes single line to report.
"""
if self._allurelistener:
with self.step(text):
pass
def environment(self, **env_dict):
if self._allurelistener:
self._allurelistener.environment.update(env_dict)
@property
def attach_type(self):
return AttachmentType
@property
def severity_level(self):
return Severity
def __getattr__(self, attr):
"""
Provides fancy shortcuts for severity::
# these are the same
pytest.allure.CRITICAL
pytest.allure.severity(pytest.allure.severity_level.CRITICAL)
"""
if attr in dir(Severity) and not attr.startswith('_'):
return self.severity(getattr(Severity, attr))
else:
raise AttributeError
MASTER_HELPER = AllureHelper()
def pytest_namespace():
return {'allure': MASTER_HELPER}
class AllureAgregatingListener(object):
"""
Listens to pytest hooks to generate reports for common tests.
"""
def __init__(self, impl, config):
self.impl = impl
# module's nodeid => TestSuite object
self.suites = {}
def pytest_sessionfinish(self):
"""
We are done and have all the results in `self.suites`
Lets write em down.
But first we kinda-unify the test cases.
We expect cases to come from AllureTestListener -- and the have ._id field to manifest their identity.
Of all the test cases in suite.testcases we leave LAST with the same ID -- becase logreport can be sent MORE THAN ONE TIME
(namely, if the test fails and then gets broken -- to cope with the xdist's -x behavior we have to have tests even at CALL failures)
TODO: do it in a better, more efficient way
"""
for s in self.suites.values():
if s.tests: # nobody likes empty suites
s.stop = max(case.stop for case in s.tests)
known_ids = set()
refined_tests = []
for t in s.tests[::-1]:
if t.id not in known_ids:
known_ids.add(t.id)
refined_tests.append(t)
s.tests = refined_tests[::-1]
with self.impl._reportfile('%s-testsuite.xml' % uuid.uuid4()) as f:
self.impl._write_xml(f, s)
self.impl.store_environment()
def write_attach(self, attachment):
"""
Writes attachment object from the `AllureTestListener` to the FS, fixing it fields
:param attachment: a :py:class:`allure.structure.Attach` object
"""
# OMG, that is bad
attachment.source = self.impl._save_attach(attachment.source, attachment.type)
attachment.type = attachment.type.mime_type
def pytest_runtest_logreport(self, report):
if hasattr(report, '_allure_result'):
module_id, module_name, module_doc, environment, testcase = pickle.loads(report._allure_result)
report._allure_result = None # so actual pickled data is garbage-collected, see https://github.com/allure-framework/allure-python/issues/98
self.impl.environment.update(environment)
for a in testcase.iter_attachments():
self.write_attach(a)
self.suites.setdefault(module_id, TestSuite(name=module_name,
description=module_doc,
tests=[],
labels=[],
start=testcase.start, # first case starts the suite!
stop=None)).tests.append(testcase)
CollectFail = namedtuple('CollectFail', 'name status message trace')
class AllureCollectionListener(object):
"""
Listens to pytest collection-related hooks
to generate reports for modules that failed to collect.
"""
def __init__(self, impl):
self.impl = impl
self.fails = []
def pytest_collectreport(self, report):
if not report.passed:
if report.failed:
status = Status.BROKEN
else:
status = Status.CANCELED
self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split("::"))[-1],
status=status,
message=get_exception_message(None, None, report),
trace=report.longrepr))
def pytest_sessionfinish(self):
"""
Creates a testsuite with collection failures if there were any.
"""
if self.fails:
self.impl.start_suite(name='test_collection_phase',
title='Collection phase',
description='This is the tests collection phase. Failures are modules that failed to collect.')
for fail in self.fails:
self.impl.start_case(name=fail.name.split(".")[-1])
self.impl.stop_case(status=fail.status, message=fail.message, trace=fail.trace)
self.impl.stop_suite()
|
[
"allure.common.AllureImpl",
"collections.namedtuple",
"allure.structure.TestLabel",
"allure.utils.get_exception_message",
"allure.structure.Attach",
"pickle.dumps",
"pickle.loads",
"allure.utils.parent_down_from_module",
"allure.utils.now",
"uuid.uuid4",
"allure.utils.labels_of",
"allure.structure.TestSuite",
"allure.utils.all_of",
"six.text_type",
"allure.utils.parent_module"
] |
[((21294, 21348), 'collections.namedtuple', 'namedtuple', (['"""CollectFail"""', '"""name status message trace"""'], {}), "('CollectFail', 'name status message trace')\n", (21304, 21348), False, 'from collections import namedtuple\n'), ((3821, 3842), 'allure.common.AllureImpl', 'AllureImpl', (['reportdir'], {}), '(reportdir)\n', (3831, 3842), False, 'from allure.common import AllureImpl, StepContext\n'), ((6157, 6211), 'allure.structure.Attach', 'Attach', ([], {'source': 'contents', 'title': 'title', 'type': 'attach_type'}), '(source=contents, title=title, type=attach_type)\n', (6163, 6211), False, 'from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel\n'), ((7389, 7394), 'allure.utils.now', 'now', ([], {}), '()\n', (7392, 7394), False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((7876, 7881), 'allure.utils.now', 'now', ([], {}), '()\n', (7879, 7881), False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((8913, 8932), 'allure.utils.parent_module', 'parent_module', (['item'], {}), '(item)\n', (8926, 8932), False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((984, 1000), 'allure.utils.all_of', 'all_of', (['Severity'], {}), '(Severity)\n', (990, 1000), False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((20456, 20491), 'pickle.loads', 'pickle.loads', (['report._allure_result'], {}), '(report._allure_result)\n', (20468, 20491), False, 'import pickle\n'), ((5535, 5540), 'allure.utils.now', 'now', ([], {}), '()\n', (5538, 5540), False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((5623, 5638), 'allure.utils.labels_of', 'labels_of', (['item'], {}), '(item)\n', (5632, 5638), False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((7060, 7065), 'allure.utils.now', 'now', ([], {}), '()\n', (7063, 7065), False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((9088, 9203), 'pickle.dumps', 'pickle.dumps', (["(parent.nodeid, parent.module.__name__, parent.module.__doc__ or '', self.\n environment, self.test)"], {}), "((parent.nodeid, parent.module.__name__, parent.module.__doc__ or\n '', self.environment, self.test))\n", (9100, 9203), False, 'import pickle\n'), ((12981, 12996), 'allure.utils.labels_of', 'labels_of', (['item'], {}), '(item)\n', (12990, 12996), False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((5757, 5769), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5767, 5769), False, 'import uuid\n'), ((6523, 6563), 'allure.structure.TestLabel', 'TestLabel', ([], {'name': 'Label.ISSUE', 'value': 'issue'}), '(name=Label.ISSUE, value=issue)\n', (6532, 6563), False, 'from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel\n'), ((8003, 8060), 'allure.utils.get_exception_message', 'get_exception_message', (['call.excinfo', 'pyteststatus', 'report'], {}), '(call.excinfo, pyteststatus, report)\n', (8024, 8060), False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((13336, 13348), 'six.text_type', 'text_type', (['l'], {}), '(l)\n', (13345, 13348), False, 'from six import text_type\n'), ((22002, 22043), 'allure.utils.get_exception_message', 'get_exception_message', (['None', 'None', 'report'], {}), '(None, None, report)\n', (22023, 22043), False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n'), ((19787, 19799), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (19797, 19799), False, 'import uuid\n'), ((20836, 20945), 'allure.structure.TestSuite', 'TestSuite', ([], {'name': 'module_name', 'description': 'module_doc', 'tests': '[]', 'labels': '[]', 'start': 'testcase.start', 'stop': 'None'}), '(name=module_name, description=module_doc, tests=[], labels=[],\n start=testcase.start, stop=None)\n', (20845, 20945), False, 'from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel\n'), ((5412, 5441), 'allure.utils.parent_down_from_module', 'parent_down_from_module', (['item'], {}), '(item)\n', (5435, 5441), False, 'from allure.utils import parent_module, parent_down_from_module, labels_of, all_of, get_exception_message, now, mangle_testnames\n')]
|
bl_info = {
"name": "STRING",
"blender": (2, 80, 0),
"category": "Object",
'Author' : '<NAME>'
}
import bpy
import bmesh
class STRING(bpy.types.Operator):
"""My Object Moving Script""" # Use this as a tooltip for menu items and buttons.
bl_idname = "object.stringtool_ot" # Unique identifier for buttons and menu items to reference.
bl_label = "String" # Display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # Enable undo for the operator.
bdepth: bpy.props.FloatProperty(name = "String Thickness", min = 0.1, max = 5, precision = 2 )
def execute(self, context):
# The original script
####################
#to create an edge between two given objects
def Edgify(ob1,ob2):
loc1 = ob1.location
loc2 = ob2.location
verts = [loc1,loc2]
bpy.ops.mesh.primitive_plane_add(location = (0,0,0))
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.delete(type='VERT')
#creating the vertices using the current mesh data into bmesh
pipe = bpy.context.object.data
bm = bmesh.new()
for v in verts:
bm.verts.new(v)
bpy.ops.object.editmode_toggle()
bm.to_mesh(pipe)
bm.free()
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.edge_face_add()
bpy.ops.object.editmode_toggle()
def string(olist):
edges = []
l = len(olist)
for x in range(l):
for y in range(l):
if y != x and x < y :
Edgify(olist[x], olist[y])
edges.append(bpy.context.active_object)
return edges
def piper(xlist):
bpy.ops.object.select_all(action='DESELECT')
for x in xlist:
x.select_set(True)
bpy.ops.object.join()
bpy.ops.object.convert(target='CURVE')
def check(olist):
if len(olist) == 0:
self.report({'INFO'},'NONE SELECTED OBJECTS')
return 0
else:
return 1
oblist = bpy.context.selected_objects
Edgelist = string(oblist)
piper(Edgelist)
actob = bpy.context.active_object
actob.data.bevel_depth = self.bdepth
bpy.ops.object.shade_smooth()
########################
return {'FINISHED'} # Lets Blender know the operator finished successfully.
class STRING_PT(bpy.types.Panel):
bl_idname = "object_stringtool_pt"
bl_label = "String"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "newprop"
def draw(self, context):
# You can set the property values that should be used when the user
# presses the button in the UI.
layout = self.layout
props = layout.operator('object.stringtool_ot')
def register():
bpy.utils.register_class(STRING)
def unregister():
bpy.utils.unregister_class(STRING)
# This allows you to run the script directly from Blender's Text editor
# to test the add-on without having to install it.
if __name__ == "__main__":
register()
|
[
"bpy.utils.unregister_class",
"bpy.ops.object.editmode_toggle",
"bpy.ops.mesh.delete",
"bpy.ops.mesh.select_all",
"bpy.ops.object.select_all",
"bpy.props.FloatProperty",
"bmesh.new",
"bpy.ops.object.shade_smooth",
"bpy.ops.object.convert",
"bpy.ops.mesh.primitive_plane_add",
"bpy.ops.object.join",
"bpy.utils.register_class",
"bpy.ops.mesh.edge_face_add"
] |
[((548, 625), 'bpy.props.FloatProperty', 'bpy.props.FloatProperty', ([], {'name': '"""String Thickness"""', 'min': '(0.1)', 'max': '(5)', 'precision': '(2)'}), "(name='String Thickness', min=0.1, max=5, precision=2)\n", (571, 625), False, 'import bpy\n'), ((3407, 3439), 'bpy.utils.register_class', 'bpy.utils.register_class', (['STRING'], {}), '(STRING)\n', (3431, 3439), False, 'import bpy\n'), ((3468, 3502), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['STRING'], {}), '(STRING)\n', (3494, 3502), False, 'import bpy\n'), ((2747, 2776), 'bpy.ops.object.shade_smooth', 'bpy.ops.object.shade_smooth', ([], {}), '()\n', (2774, 2776), False, 'import bpy\n'), ((936, 988), 'bpy.ops.mesh.primitive_plane_add', 'bpy.ops.mesh.primitive_plane_add', ([], {'location': '(0, 0, 0)'}), '(location=(0, 0, 0))\n', (968, 988), False, 'import bpy\n'), ((1002, 1034), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (1032, 1034), False, 'import bpy\n'), ((1048, 1088), 'bpy.ops.mesh.select_all', 'bpy.ops.mesh.select_all', ([], {'action': '"""SELECT"""'}), "(action='SELECT')\n", (1071, 1088), False, 'import bpy\n'), ((1102, 1134), 'bpy.ops.mesh.delete', 'bpy.ops.mesh.delete', ([], {'type': '"""VERT"""'}), "(type='VERT')\n", (1121, 1134), False, 'import bpy\n'), ((1275, 1286), 'bmesh.new', 'bmesh.new', ([], {}), '()\n', (1284, 1286), False, 'import bmesh\n'), ((1394, 1426), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (1424, 1426), False, 'import bpy\n'), ((1494, 1526), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (1524, 1526), False, 'import bpy\n'), ((1540, 1580), 'bpy.ops.mesh.select_all', 'bpy.ops.mesh.select_all', ([], {'action': '"""SELECT"""'}), "(action='SELECT')\n", (1563, 1580), False, 'import bpy\n'), ((1594, 1622), 'bpy.ops.mesh.edge_face_add', 'bpy.ops.mesh.edge_face_add', ([], {}), '()\n', (1620, 1622), False, 'import bpy\n'), ((1636, 1668), 'bpy.ops.object.editmode_toggle', 'bpy.ops.object.editmode_toggle', ([], {}), '()\n', (1666, 1668), False, 'import bpy\n'), ((2063, 2107), 'bpy.ops.object.select_all', 'bpy.ops.object.select_all', ([], {'action': '"""DESELECT"""'}), "(action='DESELECT')\n", (2088, 2107), False, 'import bpy\n'), ((2186, 2207), 'bpy.ops.object.join', 'bpy.ops.object.join', ([], {}), '()\n', (2205, 2207), False, 'import bpy\n'), ((2221, 2259), 'bpy.ops.object.convert', 'bpy.ops.object.convert', ([], {'target': '"""CURVE"""'}), "(target='CURVE')\n", (2243, 2259), False, 'import bpy\n')]
|
import requests
import sys
import time
import os
def main():
trigger_url = sys.argv[1]
trigger_resp = requests.get(trigger_url)
if trigger_resp.ok:
trigger_json = trigger_resp.json().get("data", {})
test_runs = trigger_json.get("runs", [])
print ("Started {} test runs.".format(len(test_runs)))
results = {}
while len(results.keys()) < len(test_runs):
time.sleep(1)
for run in test_runs:
test_run_id = run.get("test_run_id")
if not test_run_id in results:
result = _get_result(run)
if result.get("result") in ["pass", "fail"]:
results[test_run_id] = result
pass_count = sum([r.get("result") == "pass" for r in results.values()])
fail_count = sum([r.get("result") == "fail" for r in results.values()])
if fail_count > 0:
print ("{} test runs passed. {} test runs failed.".format(pass_count, fail_count))
exit(1)
print ("All test runs passed.")
def _get_result(test_run):
# generate Personal Access Token at https://www.runscope.com/applications
if not "RUNSCOPE_ACCESS_TOKEN" in os.environ:
print ("Please set the environment variable RUNSCOPE_ACCESS_TOKEN. You can get an access token by going to https://www.runscope.com/applications")
exit(1)
API_TOKEN = os.environ["RUNSCOPE_ACCESS_TOKEN"]
opts = {
"base_url": "https://api.runscope.com",
"bucket_key": test_run.get("bucket_key"),
"test_id": test_run.get("test_id"),
"test_run_id": test_run.get("test_run_id")
}
result_url = "{base_url}/buckets/{bucket_key}/tests/{test_id}/results/{test_run_id}".format(**opts)
print ("Getting result: {}".format(result_url))
headers = {
"Authorization": "Bearer {}".format(API_TOKEN),
"User-Agent": "python-trigger-sample"
}
result_resp = requests.get(result_url, headers=headers)
if result_resp.ok:
return result_resp.json().get("data")
return None
if __name__ == '__main__':
main()
|
[
"time.sleep",
"requests.get"
] |
[((112, 137), 'requests.get', 'requests.get', (['trigger_url'], {}), '(trigger_url)\n', (124, 137), False, 'import requests\n'), ((1978, 2019), 'requests.get', 'requests.get', (['result_url'], {'headers': 'headers'}), '(result_url, headers=headers)\n', (1990, 2019), False, 'import requests\n'), ((422, 435), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (432, 435), False, 'import time\n')]
|
from matplotlib.colors import ListedColormap
cm3 = ListedColormap(['#0000aa', '#ff2020', '#50ff50'])
cm2 = ListedColormap(['#0000aa', '#ff2020'])
|
[
"matplotlib.colors.ListedColormap"
] |
[((52, 101), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['#0000aa', '#ff2020', '#50ff50']"], {}), "(['#0000aa', '#ff2020', '#50ff50'])\n", (66, 101), False, 'from matplotlib.colors import ListedColormap\n'), ((108, 146), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['#0000aa', '#ff2020']"], {}), "(['#0000aa', '#ff2020'])\n", (122, 146), False, 'from matplotlib.colors import ListedColormap\n')]
|
from .._BlackJack import BlackJackCPP
import gym
import ctypes
import numpy as np
from gym import spaces
class BlackJack(gym.Env):
def __init__(self, natural=False):
self.env = BlackJackCPP(natural)
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Tuple((
spaces.Discrete(32),
spaces.Discrete(11),
spaces.Discrete(2)
))
self.state = None
self.natural = natural
def seed(self, seed=None):
if seed is None:
return [self.env.get_seed()]
else:
if not isinstance(seed, ctypes.c_uint32):
seed = ctypes.c_uint32(seed).value
self.env.set_seed(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action)
state, reward, done = self.env.step(action)
self.state = np.array(state)
return self.state, reward, done, {}
def render(self, mode='human'):
return None
def reset(self):
self.state = np.array(self.env.reset())
return self.state
|
[
"numpy.array",
"ctypes.c_uint32",
"gym.spaces.Discrete"
] |
[((242, 260), 'gym.spaces.Discrete', 'spaces.Discrete', (['(2)'], {}), '(2)\n', (257, 260), False, 'from gym import spaces\n'), ((905, 920), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (913, 920), True, 'import numpy as np\n'), ((321, 340), 'gym.spaces.Discrete', 'spaces.Discrete', (['(32)'], {}), '(32)\n', (336, 340), False, 'from gym import spaces\n'), ((354, 373), 'gym.spaces.Discrete', 'spaces.Discrete', (['(11)'], {}), '(11)\n', (369, 373), False, 'from gym import spaces\n'), ((387, 405), 'gym.spaces.Discrete', 'spaces.Discrete', (['(2)'], {}), '(2)\n', (402, 405), False, 'from gym import spaces\n'), ((663, 684), 'ctypes.c_uint32', 'ctypes.c_uint32', (['seed'], {}), '(seed)\n', (678, 684), False, 'import ctypes\n')]
|
import numpy as np
from . import _version
__version__ = _version.get_versions()['version']
HXR_COLORS = ("#000000", "#02004a", "#030069", "#04008f", "#0500b3", "#0700ff")
SXR_COLORS = ("#000000", "#330000", "#520000", "#850000", "#ad0000", "#ff0000")
HXR_AREAS = {
"GUN" : [2017.911, 2018.712],
"L0" : [2018.712, 2024.791],
"DL1_1": [2024.791, 2031.992],
"DL1_2": [2031.992, 2035.035],
"L1": [2035.035, 2044.167],
"BC1": [2044.167, 2059.733],
"L2": [2059.733, 2410.698],
"BC2": [2410.698, 2438.400],
"L3": [2438.400, 3042.005],
"CLTH_0": [3042.005, 3050.512],
"CLTH_1": [3050.512, 3058.457],
"CLTH_2": [3058.457, 3110.961],
"BSYH_1": [3110.961, 3117.409],
"BSYH_2": [3117.409, 3224.022],
"LTUH": [3224.022, 3562.739],
"UNDH": [3562.739, 3718.483],
"DMPH_1": [3718.483, 3734.407],
"DMPH_2": [3734.407, 3765.481]
}
HXR_AREAS = {np.mean(value): key for key, value in HXR_AREAS.items()}
SXR_AREAS = {
"GUN" : [2017.911, 2017.911],
"L0" : [2018.712, 2024.791],
"DL1_1": [2024.791, 2031.992],
"DL1_2": [2031.992, 2035.035],
"L1": [2035.035, 2044.167],
"BC1": [2044.167, 2059.733],
"L2": [2059.733, 2410.698],
"BC2": [2410.698, 2438.400],
"L3": [2438.400, 3042.005],
"CLTH_0": [3042.005, 3050.512],
"CLTH_1": [3050.512, 3058.457],
"CLTS": [3177.650, 3224.022],
"BSYS": [3224.022, 3565.656],
"LTUS": [3565.656, 3718.483],
"UNDS": [3718.483, 3734.407],
"DMPS_1": [3734.407, 3734.407],
"DMPS_2": [3734.407, 3765.481]
}
SXR_AREAS = {np.mean(value): key for key, value in SXR_AREAS.items()}
|
[
"numpy.mean"
] |
[((903, 917), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (910, 917), True, 'import numpy as np\n'), ((1569, 1583), 'numpy.mean', 'np.mean', (['value'], {}), '(value)\n', (1576, 1583), True, 'import numpy as np\n')]
|
import os
import tempfile
import numpy as np
import tensorflow as tf
from time import time
from termcolor import cprint
from unittest import TestCase
from .. import K
from .. import Input, Dense, GRU, Bidirectional, Embedding
from .. import Model, load_model
from .. import l2
from .. import maxnorm
from .. import Adam, Nadam, SGD
from .. import AdamW, NadamW, SGDW
from .. import get_weight_decays, fill_dict_in_order, reset_seeds, K_eval
print("TF version: %s" % tf.__version__)
tf_eager = bool(os.environ["TF_EAGER"] == "True")
if tf_eager:
print("TF running eagerly")
else:
tf.compat.v1.disable_eager_execution()
print("TF running in graph mode")
class TestOptimizers(TestCase):
def test_all(self): # Save/Load, Warm Restarts (w/ cosine annealing)
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
reset_seeds()
num_batches, num_epochs = 25, 4
batch_size, timesteps, num_channels = 16, 8, 4
batch_shape = (batch_size, timesteps, num_channels)
total_iterations = num_batches # due to warm restarts
self.model = self._make_model(batch_shape, total_iterations)
optimizer = self._make_optimizer(optimizer_name, self.model,
total_iterations)
self.model.compile(optimizer, loss='binary_crossentropy')
self.assertTrue(self._valid_weight_decays(self.model))
self.model._make_train_function() # else K.eval before train may fail
X, Y = self._make_data(num_batches, *batch_shape)
self.eta_history = [] # for stop-introspection
self.t_cur_history = [] # for stop-introspection
for epoch in range(num_epochs):
for batch_num in range(num_batches):
self.t_cur_history += [K_eval(self.model.optimizer.t_cur, K)]
self.eta_history += [K_eval(self.model.optimizer.eta_t, K)]
self.model.train_on_batch(X[batch_num], Y[batch_num])
self.eta_history += [K_eval(self.model.optimizer.eta_t, K)]
self.eta_history.pop(-(1 + int(tf_eager)))
K.set_value(self.model.optimizer.t_cur, 0)
self.assertTrue(self._valid_cosine_annealing(self.eta_history,
total_iterations, num_epochs))
self._test_save_load(self.model, X, optimizer_name, optimizer)
# cleanup
del self.model, optimizer
reset_seeds(reset_graph_with_backend=K)
cprint("\n<< {} MAIN TEST PASSED >>\n".format(optimizer_name), 'green')
cprint("\n<< ALL MAIN TESTS PASSED >>\n", 'green')
def test_misc(self): # tests of non-main features to improve coverage
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
reset_seeds()
optimizer_kw = {'total_iterations': 0, 'decay': 1e-3,
'amsgrad': optimizer_name == 'AdamW',
'nesterov': optimizer_name == 'SGDW'}
num_batches = 4
batch_size, timesteps = 16, 8
batch_shape = (batch_size, timesteps)
embed_input_dim = 5
total_iterations = 0
self.model = self._make_model(batch_shape, total_iterations,
embed_input_dim=embed_input_dim,
dense_constraint=1, l2_reg=1e-4,
bidirectional=False, sparse=True)
optimizer = self._make_optimizer(optimizer_name, self.model,
**optimizer_kw)
self.model.compile(optimizer, loss='sparse_categorical_crossentropy')
X, Y = self._make_data(num_batches, *batch_shape,
embed_input_dim=embed_input_dim, sparse=True)
for batch_num in range(num_batches):
self.model.train_on_batch(X[batch_num], Y[batch_num])
self._test_save_load(self.model, X, optimizer_name, optimizer)
# cleanup
del self.model, optimizer
reset_seeds(reset_graph_with_backend=K)
cprint("\n<< {} MISC TEST PASSED >>\n".format(optimizer_name), 'green')
cprint("\n<< ALL MISC TESTS PASSED >>\n", 'green')
def test_control(self): # tests losses against original optimizers'
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
pass_txt = "Control Test Passed"
if optimizer_name == 'AdamW':
for amsgrad in [True, False]:
self._test_control(optimizer_name, amsgrad=amsgrad)
print("\n>> AdamW amsgrad={} {}".format(amsgrad, pass_txt))
elif optimizer_name == 'NadamW':
self._test_control(optimizer_name)
elif optimizer_name == 'SGDW':
for nesterov in [True, False]:
self._test_control(optimizer_name, nesterov=nesterov)
print("\n>> SGDW nesterov={} {}".format(nesterov, pass_txt))
o_name = optimizer_name
cprint("\n<< {} {} >>\n".format(o_name, pass_txt.upper()), 'green')
cprint("\n<< ALL CONTROL TESTS PASSED >>\n", 'green')
def _test_control(self, optimizer_name, amsgrad=False, nesterov=False):
optimizer_kw = dict(total_iterations=0, decay=1e-3,
amsgrad=amsgrad, nesterov=nesterov,
control_mode=True)
num_batches = 100
batch_size, timesteps = 16, 32
batch_shape = (batch_size, timesteps)
embed_input_dim = 5
total_iterations = 0
model_kw = dict(batch_shape=batch_shape, dense_constraint=1,
total_iterations=total_iterations,
embed_input_dim=embed_input_dim, l2_reg=0,
bidirectional=False, sparse=True)
loss_name = 'sparse_categorical_crossentropy'
reset_seeds(verbose=0)
X, Y = self._make_data(num_batches, *batch_shape,
embed_input_dim=embed_input_dim, sparse=True)
reset_seeds(reset_graph_with_backend=K, verbose=0)
self.model_custom = self._make_model(**model_kw)
optimizer_custom = self._make_optimizer(optimizer_name,
self.model_custom,
**optimizer_kw)
self.model_custom.compile(optimizer_custom, loss=loss_name)
self.loss_custom = [] # for introspection
t0 = time()
for batch_num in range(num_batches):
self.loss_custom += [self.model_custom.train_on_batch(
X[batch_num], Y[batch_num])]
print("model_custom -- %s batches -- time: %.2f sec" % (num_batches,
time() - t0))
reset_seeds(reset_graph_with_backend=K, verbose=0)
self.model_control = self._make_model(**model_kw)
optimizer_control = self._make_optimizer(optimizer_name[:-1],
self.model_control,
**optimizer_kw)
self.model_control.compile(optimizer_control, loss=loss_name)
self.loss_control = [] # for introspection
t0 = time()
for batch_num in range(num_batches):
self.loss_control += [self.model_control.train_on_batch(
X[batch_num], Y[batch_num])]
print("model_control -- %s batches -- time: %.2f sec" % (num_batches,
time() - t0))
loss_diff = np.abs(np.array(self.loss_custom) -
np.array(self.loss_control))
print("%s max loss diff: %e" % (optimizer_name, np.max(loss_diff)))
self.assertTrue(np.allclose(self.loss_custom, self.loss_control,
rtol=0, atol=1e-3))
# cleanup
del self.model_custom, self.model_control
del optimizer_custom, optimizer_control
reset_seeds(reset_graph_with_backend=K, verbose=0)
def _test_save_load(self, model, X, optimizer_name, optimizer):
saved_model_preds = model.predict(X[0])
saved_model_weights = K.batch_get_value(model.trainable_weights)
saved_optim_weights = K.batch_get_value(model.optimizer.weights)
test_name = 'test__%f{}.h5'.format(np.random.random())
modelpath = os.path.join(tempfile.gettempdir(), test_name)
model.save(modelpath)
del model
model = load_model(modelpath, custom_objects={optimizer_name: optimizer})
loaded_model_preds = model.predict(X[0])
loaded_model_weights = K.batch_get_value(model.trainable_weights)
loaded_optim_weights = K.batch_get_value(model.optimizer.weights)
self.assertTrue(np.allclose(saved_model_preds, loaded_model_preds,
rtol=0, atol=1e-8))
for smw, lmw in zip(saved_model_weights, loaded_model_weights):
self.assertTrue(np.allclose(smw, lmw, rtol=0, atol=1e-8))
for sow, low in zip(saved_optim_weights, loaded_optim_weights):
self.assertTrue(np.allclose(sow, low, rtol=0, atol=1e-8))
@staticmethod
def _make_data(num_batches, batch_size, timesteps, num_channels=None,
embed_input_dim=None, sparse=False):
if sparse:
X = np.random.randint(0, embed_input_dim,
(num_batches, batch_size, timesteps))
else:
X = np.random.randn(num_batches, batch_size, timesteps, num_channels)
Y = np.random.randint(0, 2, (num_batches, batch_size))
return X, Y
@staticmethod
def _make_model(batch_shape, total_iterations, l2_reg=0, bidirectional=True,
dense_constraint=None, embed_input_dim=None, sparse=False):
if dense_constraint is not None:
dense_constraint = maxnorm(dense_constraint)
ipt = Input(batch_shape=batch_shape)
if sparse:
x = Embedding(embed_input_dim, embed_input_dim*3 + 1,
mask_zero=True)(ipt)
else:
x = ipt
gru = GRU(4, recurrent_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg))
if bidirectional:
x = Bidirectional(gru)(x)
else:
x = gru(x)
x = Dense(2, kernel_regularizer=l2(l2_reg),
kernel_constraint=dense_constraint)(x)
if sparse:
out = Dense(2, activation='softmax')(x)
else:
out = Dense(1, activation='sigmoid')(x)
return Model(ipt, out)
@staticmethod
def _make_optimizer(optimizer_name, model, total_iterations, decay=0,
amsgrad=False, nesterov=False, control_mode=False):
optimizer_dict = {'AdamW': AdamW, 'NadamW': NadamW, 'SGDW': SGDW,
'Adam': Adam, 'Nadam': Nadam, 'SGD': SGD}
optimizer = optimizer_dict[optimizer_name]
optimizer_kw = {}
if 'Adam' in optimizer_name:
optimizer_kw = {'amsgrad': amsgrad}
elif 'SGD' in optimizer_name:
optimizer_kw = {'nesterov': nesterov, 'momentum': .9}
if 'Nadam' not in optimizer_name:
optimizer_kw.update({'decay': decay})
if not control_mode:
wd_dict = get_weight_decays(model)
l2_extra = [2e-5]*(len(wd_dict) - 3)
wd = fill_dict_in_order(wd_dict, [1e-5, 1e-5, 1e-6] + l2_extra)
lr_m = {'gru': 0.5}
use_cosine_annealing = True
else:
wd, lr_m = None, None
use_cosine_annealing = False
if not any([optimizer_name == name for name in ('Adam', 'Nadam', 'SGD')]):
return optimizer(lr=1e-4, weight_decays=wd, lr_multipliers=lr_m,
use_cosine_annealing=use_cosine_annealing, t_cur=0,
total_iterations=total_iterations, **optimizer_kw)
else:
return optimizer(lr=1e-4, **optimizer_kw)
@staticmethod
def _valid_weight_decays(model):
weight_decays = get_weight_decays(model)
trues = 0
for wd in weight_decays.values():
trues += (wd != 0)
return (trues == 0)
@staticmethod
def _valid_cosine_annealing(eta_history, total_iterations, num_epochs):
eta_history_simul = []
for epoch in range(num_epochs):
for iteration in range(0, total_iterations):
eta_history_simul.append(0.5 * (
1 + np.cos(np.pi*iteration / total_iterations)))
return np.allclose(eta_history, eta_history_simul, rtol=0, atol=2e-7)
|
[
"numpy.allclose",
"numpy.random.random",
"numpy.max",
"tensorflow.compat.v1.disable_eager_execution",
"numpy.random.randint",
"numpy.random.randn",
"tempfile.gettempdir",
"numpy.array",
"numpy.cos",
"time.time",
"termcolor.cprint"
] |
[((591, 629), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (627, 629), True, 'import tensorflow as tf\n'), ((2744, 2796), 'termcolor.cprint', 'cprint', (['"""\n<< ALL MAIN TESTS PASSED >>\n"""', '"""green"""'], {}), '("""\n<< ALL MAIN TESTS PASSED >>\n""", \'green\')\n', (2750, 2796), False, 'from termcolor import cprint\n'), ((4481, 4533), 'termcolor.cprint', 'cprint', (['"""\n<< ALL MISC TESTS PASSED >>\n"""', '"""green"""'], {}), '("""\n<< ALL MISC TESTS PASSED >>\n""", \'green\')\n', (4487, 4533), False, 'from termcolor import cprint\n'), ((5499, 5554), 'termcolor.cprint', 'cprint', (['"""\n<< ALL CONTROL TESTS PASSED >>\n"""', '"""green"""'], {}), '("""\n<< ALL CONTROL TESTS PASSED >>\n""", \'green\')\n', (5505, 5554), False, 'from termcolor import cprint\n'), ((6887, 6893), 'time.time', 'time', ([], {}), '()\n', (6891, 6893), False, 'from time import time\n'), ((7667, 7673), 'time.time', 'time', ([], {}), '()\n', (7671, 7673), False, 'from time import time\n'), ((10028, 10078), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(num_batches, batch_size)'], {}), '(0, 2, (num_batches, batch_size))\n', (10045, 10078), True, 'import numpy as np\n'), ((13060, 13123), 'numpy.allclose', 'np.allclose', (['eta_history', 'eta_history_simul'], {'rtol': '(0)', 'atol': '(2e-07)'}), '(eta_history, eta_history_simul, rtol=0, atol=2e-07)\n', (13071, 13123), True, 'import numpy as np\n'), ((8208, 8276), 'numpy.allclose', 'np.allclose', (['self.loss_custom', 'self.loss_control'], {'rtol': '(0)', 'atol': '(0.001)'}), '(self.loss_custom, self.loss_control, rtol=0, atol=0.001)\n', (8219, 8276), True, 'import numpy as np\n'), ((8795, 8813), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8811, 8813), True, 'import numpy as np\n'), ((8848, 8869), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (8867, 8869), False, 'import tempfile\n'), ((9235, 9305), 'numpy.allclose', 'np.allclose', (['saved_model_preds', 'loaded_model_preds'], {'rtol': '(0)', 'atol': '(1e-08)'}), '(saved_model_preds, loaded_model_preds, rtol=0, atol=1e-08)\n', (9246, 9305), True, 'import numpy as np\n'), ((9810, 9885), 'numpy.random.randint', 'np.random.randint', (['(0)', 'embed_input_dim', '(num_batches, batch_size, timesteps)'], {}), '(0, embed_input_dim, (num_batches, batch_size, timesteps))\n', (9827, 9885), True, 'import numpy as np\n'), ((9950, 10015), 'numpy.random.randn', 'np.random.randn', (['num_batches', 'batch_size', 'timesteps', 'num_channels'], {}), '(num_batches, batch_size, timesteps, num_channels)\n', (9965, 10015), True, 'import numpy as np\n'), ((8022, 8048), 'numpy.array', 'np.array', (['self.loss_custom'], {}), '(self.loss_custom)\n', (8030, 8048), True, 'import numpy as np\n'), ((8078, 8105), 'numpy.array', 'np.array', (['self.loss_control'], {}), '(self.loss_control)\n', (8086, 8105), True, 'import numpy as np\n'), ((9442, 9483), 'numpy.allclose', 'np.allclose', (['smw', 'lmw'], {'rtol': '(0)', 'atol': '(1e-08)'}), '(smw, lmw, rtol=0, atol=1e-08)\n', (9453, 9483), True, 'import numpy as np\n'), ((9584, 9625), 'numpy.allclose', 'np.allclose', (['sow', 'low'], {'rtol': '(0)', 'atol': '(1e-08)'}), '(sow, low, rtol=0, atol=1e-08)\n', (9595, 9625), True, 'import numpy as np\n'), ((8163, 8180), 'numpy.max', 'np.max', (['loss_diff'], {}), '(loss_diff)\n', (8169, 8180), True, 'import numpy as np\n'), ((7196, 7202), 'time.time', 'time', ([], {}), '()\n', (7200, 7202), False, 'from time import time\n'), ((7980, 7986), 'time.time', 'time', ([], {}), '()\n', (7984, 7986), False, 'from time import time\n'), ((13000, 13044), 'numpy.cos', 'np.cos', (['(np.pi * iteration / total_iterations)'], {}), '(np.pi * iteration / total_iterations)\n', (13006, 13044), True, 'import numpy as np\n')]
|
# Generated by Django 2.2.10 on 2020-02-18 12:51
import django.contrib.postgres.indexes
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("reporting", "0098_auto_20200221_2034")]
operations = [
migrations.RunSQL(
"""
drop materialized view if exists reporting_ocpallcostlineitem_daily_summary;
drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;
"""
),
migrations.RemoveIndex(model_name="ocpawscostlineitemdailysummary", name="cost_summary_node_idx"),
migrations.RemoveIndex(
model_name="ocpawscostlineitemprojectdailysummary", name="cost__proj_sum_namespace_idx"
),
migrations.RemoveIndex(model_name="ocpawscostlineitemprojectdailysummary", name="cost_proj_sum_node_idx"),
migrations.RemoveIndex(model_name="ocpazurecostlineitemdailysummary", name="ocpazure_node_idx"),
migrations.RemoveIndex(
model_name="ocpazurecostlineitemprojectdailysummary", name="ocpazure_proj_namespace_idx"
),
migrations.RemoveIndex(model_name="ocpazurecostlineitemprojectdailysummary", name="ocpazure_proj_node_idx"),
migrations.RemoveIndex(model_name="ocpusagelineitemdaily", name="namespace_idx"),
migrations.RemoveIndex(model_name="ocpusagelineitemdaily", name="node_idx"),
migrations.RemoveIndex(model_name="ocpusagelineitemdailysummary", name="summary_namespace_idx"),
migrations.RemoveIndex(model_name="ocpusagelineitemdailysummary", name="summary_node_idx"),
migrations.AlterField(
model_name="ocpawscostlineitemprojectdailysummary", name="usage_end", field=models.DateField()
),
migrations.AlterField(
model_name="ocpawscostlineitemprojectdailysummary", name="usage_start", field=models.DateField()
),
migrations.AlterField(
model_name="ocpazurecostlineitemdailysummary", name="usage_end", field=models.DateField()
),
migrations.AlterField(
model_name="ocpazurecostlineitemdailysummary", name="usage_start", field=models.DateField()
),
migrations.AlterField(
model_name="ocpazurecostlineitemprojectdailysummary", name="usage_end", field=models.DateField()
),
migrations.AlterField(
model_name="ocpazurecostlineitemprojectdailysummary", name="usage_start", field=models.DateField()
),
migrations.AlterField(model_name="ocpstoragelineitemdaily", name="usage_end", field=models.DateField()),
migrations.AlterField(model_name="ocpstoragelineitemdaily", name="usage_start", field=models.DateField()),
migrations.AlterField(model_name="ocpusagelineitemdaily", name="total_seconds", field=models.IntegerField()),
migrations.AlterField(model_name="ocpusagelineitemdaily", name="usage_end", field=models.DateField()),
migrations.AlterField(model_name="ocpusagelineitemdaily", name="usage_start", field=models.DateField()),
migrations.AlterField(model_name="ocpusagelineitemdailysummary", name="usage_end", field=models.DateField()),
migrations.AlterField(model_name="ocpusagelineitemdailysummary", name="usage_start", field=models.DateField()),
migrations.AddIndex(
model_name="ocpawscostlineitemdailysummary",
index=models.Index(fields=["node"], name="cost_summary_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpawscostlineitemprojectdailysummary",
index=models.Index(
fields=["namespace"], name="cost__proj_sum_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="ocpawscostlineitemprojectdailysummary",
index=models.Index(fields=["node"], name="cost_proj_sum_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemdailysummary",
index=models.Index(fields=["node"], name="ocpazure_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemprojectdailysummary",
index=models.Index(
fields=["namespace"], name="ocpazure_proj_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemprojectdailysummary",
index=models.Index(fields=["node"], name="ocpazure_proj_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpstoragelineitemdaily",
index=models.Index(
fields=["namespace"], name="ocp_storage_li_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="ocpstoragelineitemdaily",
index=models.Index(fields=["node"], name="ocp_storage_li_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpusagelineitemdaily",
index=models.Index(fields=["namespace"], name="namespace_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpusagelineitemdaily",
index=models.Index(fields=["node"], name="node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpusagelineitemdailysummary",
index=models.Index(fields=["namespace"], name="summary_namespace_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpusagelineitemdailysummary",
index=models.Index(fields=["node"], name="summary_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AlterField(model_name="costsummary", name="usage_end", field=models.DateField()),
migrations.AlterField(model_name="costsummary", name="usage_start", field=models.DateField()),
migrations.AddIndex(
model_name="costsummary", index=models.Index(fields=["usage_start"], name="ocpcostsum_usage_start_idx")
),
migrations.AddIndex(
model_name="costsummary",
index=models.Index(
fields=["namespace"], name="ocpcostsum_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="costsummary",
index=models.Index(fields=["node"], name="ocpcostsum_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="costsummary",
index=django.contrib.postgres.indexes.GinIndex(fields=["pod_labels"], name="ocpcostsum_pod_labels_idx"),
),
# This extension will help specifically with "col LIKE %val%"
# operations. (As long as val is at least 3 characters)
migrations.RunSQL(
"""
create extension if not exists pg_trgm schema public;
"""
),
# Create indexes to aid with text searching.
# These cases will specifically help with case-insensitive
# and contains (vs startswith) searches
# ocp usage line item daily
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocp_namespace_idx
on reporting_ocpusagelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocp_node_idx
on reporting_ocpusagelineitem_daily using gin (UPPER(node) gin_trgm_ops);
"""
),
# ocp usage line item daily summary
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocp_summary_namespace_like_idx
on reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocp_summary_node_like_idx
on reporting_ocpusagelineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# reporting_ocpstoragelineitem_daily
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocp_storage_li_namespace_like_idx
on reporting_ocpstoragelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocp_storage_li_node_like_idx
on reporting_ocpstoragelineitem_daily using gin (UPPER(node) gin_trgm_ops);
"""
),
# ocp azure cost
migrations.RunSQL(
"""
/* add node index for like trigram ops */
create index if not exists ocpazure_node_like_idx
on reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# ocp azure project cost
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocpazure_proj_namespace_like_idx
on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocpazure_proj_node_like_idx
on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# reporting_ocpawscostlineitem_daily_summary
migrations.RunSQL(
"""
/* add node index for like trigram ops */
create index if not exists cost_summary_node_like_idx
on reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# reporting_ocpawscostlineitem_project_daily_summary
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists cost__proj_sum_namespace_like_idx
on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists cost__proj_sum_node_like_idx
on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# reporting_ocpcosts_summary
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocpcostsum_namespace_like_idx
on reporting_ocpcosts_summary using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocpcostsum_node_like_idx
on reporting_ocpcosts_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
migrations.RunSQL(
"""
drop materialized view if exists reporting_ocpallcostlineitem_daily_summary;
create materialized view if not exists reporting_ocpallcostlineitem_daily_summary as
SELECT row_number() OVER () AS id,
lids.source_type,
lids.cluster_id,
lids.cluster_alias,
lids.namespace,
lids.node,
lids.resource_id,
lids.usage_start,
lids.usage_end,
lids.usage_account_id,
lids.account_alias_id,
lids.product_code,
lids.product_family,
lids.instance_type,
lids.region,
lids.availability_zone,
lids.tags,
lids.usage_amount,
lids.unit,
lids.unblended_cost,
lids.markup_cost,
lids.currency_code,
lids.shared_projects,
lids.project_costs
FROM ( SELECT 'AWS'::text AS source_type,
reporting_ocpawscostlineitem_daily_summary.cluster_id,
reporting_ocpawscostlineitem_daily_summary.cluster_alias,
reporting_ocpawscostlineitem_daily_summary.namespace,
reporting_ocpawscostlineitem_daily_summary.node,
reporting_ocpawscostlineitem_daily_summary.resource_id,
reporting_ocpawscostlineitem_daily_summary.usage_start::date,
reporting_ocpawscostlineitem_daily_summary.usage_end::date,
reporting_ocpawscostlineitem_daily_summary.usage_account_id,
reporting_ocpawscostlineitem_daily_summary.account_alias_id,
reporting_ocpawscostlineitem_daily_summary.product_code,
reporting_ocpawscostlineitem_daily_summary.product_family,
reporting_ocpawscostlineitem_daily_summary.instance_type,
reporting_ocpawscostlineitem_daily_summary.region,
reporting_ocpawscostlineitem_daily_summary.availability_zone,
reporting_ocpawscostlineitem_daily_summary.tags,
reporting_ocpawscostlineitem_daily_summary.usage_amount,
reporting_ocpawscostlineitem_daily_summary.unit,
reporting_ocpawscostlineitem_daily_summary.unblended_cost,
reporting_ocpawscostlineitem_daily_summary.markup_cost,
reporting_ocpawscostlineitem_daily_summary.currency_code,
reporting_ocpawscostlineitem_daily_summary.shared_projects,
reporting_ocpawscostlineitem_daily_summary.project_costs
FROM reporting_ocpawscostlineitem_daily_summary
WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)
UNION
SELECT 'Azure'::text AS source_type,
reporting_ocpazurecostlineitem_daily_summary.cluster_id,
reporting_ocpazurecostlineitem_daily_summary.cluster_alias,
reporting_ocpazurecostlineitem_daily_summary.namespace,
reporting_ocpazurecostlineitem_daily_summary.node,
reporting_ocpazurecostlineitem_daily_summary.resource_id,
reporting_ocpazurecostlineitem_daily_summary.usage_start::date,
reporting_ocpazurecostlineitem_daily_summary.usage_end::date,
reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id,
NULL::integer AS account_alias_id,
reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code,
NULL::character varying AS product_family,
reporting_ocpazurecostlineitem_daily_summary.instance_type,
reporting_ocpazurecostlineitem_daily_summary.resource_location AS region,
NULL::character varying AS availability_zone,
reporting_ocpazurecostlineitem_daily_summary.tags,
reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount,
reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit,
reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost,
reporting_ocpazurecostlineitem_daily_summary.markup_cost,
reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code,
reporting_ocpazurecostlineitem_daily_summary.shared_projects,
reporting_ocpazurecostlineitem_daily_summary.project_costs
FROM reporting_ocpazurecostlineitem_daily_summary
WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)) lids
with no data;
create index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix
on reporting_ocpallcostlineitem_daily_summary using gin (namespace);
create index mv_reporting_ocpallcostlineitem_daily_summary_node_ix
on reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops);
create index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix
on reporting_ocpallcostlineitem_daily_summary (usage_start);
drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;
create materialized view if not exists reporting_ocpallcostlineitem_project_daily_summary as
SELECT row_number() OVER () AS id,
lids.source_type,
lids.cluster_id,
lids.cluster_alias,
lids.data_source,
lids.namespace,
lids.node,
lids.pod_labels,
lids.resource_id,
lids.usage_start,
lids.usage_end,
lids.usage_account_id,
lids.account_alias_id,
lids.product_code,
lids.product_family,
lids.instance_type,
lids.region,
lids.availability_zone,
lids.usage_amount,
lids.unit,
lids.unblended_cost,
lids.project_markup_cost,
lids.pod_cost,
lids.currency_code
FROM ( SELECT 'AWS'::text AS source_type,
reporting_ocpawscostlineitem_project_daily_summary.cluster_id,
reporting_ocpawscostlineitem_project_daily_summary.cluster_alias,
reporting_ocpawscostlineitem_project_daily_summary.data_source,
reporting_ocpawscostlineitem_project_daily_summary.namespace,
reporting_ocpawscostlineitem_project_daily_summary.node,
reporting_ocpawscostlineitem_project_daily_summary.pod_labels,
reporting_ocpawscostlineitem_project_daily_summary.resource_id,
reporting_ocpawscostlineitem_project_daily_summary.usage_start::date,
reporting_ocpawscostlineitem_project_daily_summary.usage_end::date,
reporting_ocpawscostlineitem_project_daily_summary.usage_account_id,
reporting_ocpawscostlineitem_project_daily_summary.account_alias_id,
reporting_ocpawscostlineitem_project_daily_summary.product_code,
reporting_ocpawscostlineitem_project_daily_summary.product_family,
reporting_ocpawscostlineitem_project_daily_summary.instance_type,
reporting_ocpawscostlineitem_project_daily_summary.region,
reporting_ocpawscostlineitem_project_daily_summary.availability_zone,
reporting_ocpawscostlineitem_project_daily_summary.usage_amount,
reporting_ocpawscostlineitem_project_daily_summary.unit,
reporting_ocpawscostlineitem_project_daily_summary.unblended_cost,
reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost,
reporting_ocpawscostlineitem_project_daily_summary.pod_cost,
reporting_ocpawscostlineitem_project_daily_summary.currency_code
FROM reporting_ocpawscostlineitem_project_daily_summary
WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)
UNION
SELECT 'Azure'::text AS source_type,
reporting_ocpazurecostlineitem_project_daily_summary.cluster_id,
reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias,
reporting_ocpazurecostlineitem_project_daily_summary.data_source,
reporting_ocpazurecostlineitem_project_daily_summary.namespace,
reporting_ocpazurecostlineitem_project_daily_summary.node,
reporting_ocpazurecostlineitem_project_daily_summary.pod_labels,
reporting_ocpazurecostlineitem_project_daily_summary.resource_id,
reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date,
reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date,
reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id,
NULL::integer AS account_alias_id,
reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code,
NULL::character varying AS product_family,
reporting_ocpazurecostlineitem_project_daily_summary.instance_type,
reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region,
NULL::character varying AS availability_zone,
reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount,
reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit,
reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost,
reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost,
reporting_ocpazurecostlineitem_project_daily_summary.pod_cost,
reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code
FROM reporting_ocpazurecostlineitem_project_daily_summary
WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)) lids
with no data;
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix
on reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops);
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix
on reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops);
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix
on reporting_ocpallcostlineitem_project_daily_summary using gin (namespace gin_trgm_ops);
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix
on reporting_ocpallcostlineitem_project_daily_summary using gin (node gin_trgm_ops);
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix
on reporting_ocpallcostlineitem_project_daily_summary (usage_start);
"""
),
migrations.RunSQL(
"""
refresh materialized view reporting_ocpallcostlineitem_daily_summary;
refresh materialized view reporting_ocpallcostlineitem_project_daily_summary;
"""
),
]
|
[
"django.db.models.Index",
"django.db.models.DateField",
"django.db.models.IntegerField",
"django.db.migrations.RemoveIndex",
"django.db.migrations.RunSQL"
] |
[((282, 492), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""\ndrop materialized view if exists reporting_ocpallcostlineitem_daily_summary;\ndrop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;\n """'], {}), '(\n """\ndrop materialized view if exists reporting_ocpallcostlineitem_daily_summary;\ndrop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;\n """\n )\n', (299, 492), False, 'from django.db import migrations\n'), ((514, 616), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', ([], {'model_name': '"""ocpawscostlineitemdailysummary"""', 'name': '"""cost_summary_node_idx"""'}), "(model_name='ocpawscostlineitemdailysummary', name=\n 'cost_summary_node_idx')\n", (536, 616), False, 'from django.db import migrations\n'), ((621, 736), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', ([], {'model_name': '"""ocpawscostlineitemprojectdailysummary"""', 'name': '"""cost__proj_sum_namespace_idx"""'}), "(model_name='ocpawscostlineitemprojectdailysummary',\n name='cost__proj_sum_namespace_idx')\n", (643, 736), False, 'from django.db import migrations\n'), ((764, 873), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', ([], {'model_name': '"""ocpawscostlineitemprojectdailysummary"""', 'name': '"""cost_proj_sum_node_idx"""'}), "(model_name='ocpawscostlineitemprojectdailysummary',\n name='cost_proj_sum_node_idx')\n", (786, 873), False, 'from django.db import migrations\n'), ((879, 979), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', ([], {'model_name': '"""ocpazurecostlineitemdailysummary"""', 'name': '"""ocpazure_node_idx"""'}), "(model_name='ocpazurecostlineitemdailysummary', name=\n 'ocpazure_node_idx')\n", (901, 979), False, 'from django.db import migrations\n'), ((984, 1100), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', ([], {'model_name': '"""ocpazurecostlineitemprojectdailysummary"""', 'name': '"""ocpazure_proj_namespace_idx"""'}), "(model_name='ocpazurecostlineitemprojectdailysummary',\n name='ocpazure_proj_namespace_idx')\n", (1006, 1100), False, 'from django.db import migrations\n'), ((1128, 1239), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', ([], {'model_name': '"""ocpazurecostlineitemprojectdailysummary"""', 'name': '"""ocpazure_proj_node_idx"""'}), "(model_name='ocpazurecostlineitemprojectdailysummary',\n name='ocpazure_proj_node_idx')\n", (1150, 1239), False, 'from django.db import migrations\n'), ((1245, 1330), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', ([], {'model_name': '"""ocpusagelineitemdaily"""', 'name': '"""namespace_idx"""'}), "(model_name='ocpusagelineitemdaily', name='namespace_idx'\n )\n", (1267, 1330), False, 'from django.db import migrations\n'), ((1335, 1410), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', ([], {'model_name': '"""ocpusagelineitemdaily"""', 'name': '"""node_idx"""'}), "(model_name='ocpusagelineitemdaily', name='node_idx')\n", (1357, 1410), False, 'from django.db import migrations\n'), ((1420, 1520), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', ([], {'model_name': '"""ocpusagelineitemdailysummary"""', 'name': '"""summary_namespace_idx"""'}), "(model_name='ocpusagelineitemdailysummary', name=\n 'summary_namespace_idx')\n", (1442, 1520), False, 'from django.db import migrations\n'), ((1525, 1620), 'django.db.migrations.RemoveIndex', 'migrations.RemoveIndex', ([], {'model_name': '"""ocpusagelineitemdailysummary"""', 'name': '"""summary_node_idx"""'}), "(model_name='ocpusagelineitemdailysummary', name=\n 'summary_node_idx')\n", (1547, 1620), False, 'from django.db import migrations\n'), ((7075, 7172), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""\ncreate extension if not exists pg_trgm schema public;\n """'], {}), '(\n """\ncreate extension if not exists pg_trgm schema public;\n """)\n', (7092, 7172), False, 'from django.db import migrations\n'), ((7403, 7788), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_namespace_idx\n on reporting_ocpusagelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_node_idx\n on reporting_ocpusagelineitem_daily using gin (UPPER(node) gin_trgm_ops);\n """'], {}), '(\n """\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_namespace_idx\n on reporting_ocpusagelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_node_idx\n on reporting_ocpusagelineitem_daily using gin (UPPER(node) gin_trgm_ops);\n """\n )\n', (7420, 7788), False, 'from django.db import migrations\n'), ((7854, 8280), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_summary_namespace_like_idx\n on reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_summary_node_like_idx\n on reporting_ocpusagelineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """'], {}), '(\n """\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_summary_namespace_like_idx\n on reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_summary_node_like_idx\n on reporting_ocpusagelineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """\n )\n', (7871, 8280), False, 'from django.db import migrations\n'), ((8347, 8767), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_storage_li_namespace_like_idx\n on reporting_ocpstoragelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_storage_li_node_like_idx\n on reporting_ocpstoragelineitem_daily using gin (UPPER(node) gin_trgm_ops);\n """'], {}), '(\n """\n/* add namespace index for like trigram ops */\ncreate index if not exists ocp_storage_li_namespace_like_idx\n on reporting_ocpstoragelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocp_storage_li_node_like_idx\n on reporting_ocpstoragelineitem_daily using gin (UPPER(node) gin_trgm_ops);\n """\n )\n', (8364, 8767), False, 'from django.db import migrations\n'), ((8814, 9044), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""\n/* add node index for like trigram ops */\ncreate index if not exists ocpazure_node_like_idx\n on reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """'], {}), '(\n """\n/* add node index for like trigram ops */\ncreate index if not exists ocpazure_node_like_idx\n on reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """\n )\n', (8831, 9044), False, 'from django.db import migrations\n'), ((9099, 9553), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""\n/* add namespace index for like trigram ops */\ncreate index if not exists ocpazure_proj_namespace_like_idx\n on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocpazure_proj_node_like_idx\n on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """'], {}), '(\n """\n/* add namespace index for like trigram ops */\ncreate index if not exists ocpazure_proj_namespace_like_idx\n on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocpazure_proj_node_like_idx\n on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """\n )\n', (9116, 9553), False, 'from django.db import migrations\n'), ((9628, 9860), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""\n/* add node index for like trigram ops */\ncreate index if not exists cost_summary_node_like_idx\n on reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """'], {}), '(\n """\n/* add node index for like trigram ops */\ncreate index if not exists cost_summary_node_like_idx\n on reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """\n )\n', (9645, 9860), False, 'from django.db import migrations\n'), ((9943, 10395), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""\n/* add namespace index for like trigram ops */\ncreate index if not exists cost__proj_sum_namespace_like_idx\n on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists cost__proj_sum_node_like_idx\n on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """'], {}), '(\n """\n/* add namespace index for like trigram ops */\ncreate index if not exists cost__proj_sum_namespace_like_idx\n on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists cost__proj_sum_node_like_idx\n on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);\n """\n )\n', (9960, 10395), False, 'from django.db import migrations\n'), ((10454, 10850), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""\n/* add namespace index for like trigram ops */\ncreate index if not exists ocpcostsum_namespace_like_idx\n on reporting_ocpcosts_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocpcostsum_node_like_idx\n on reporting_ocpcosts_summary using gin (UPPER(node) gin_trgm_ops);\n """'], {}), '(\n """\n/* add namespace index for like trigram ops */\ncreate index if not exists ocpcostsum_namespace_like_idx\n on reporting_ocpcosts_summary using gin (UPPER(namespace) gin_trgm_ops);\n\n/* add node index for like trigram ops */\ncreate index if not exists ocpcostsum_node_like_idx\n on reporting_ocpcosts_summary using gin (UPPER(node) gin_trgm_ops);\n """\n )\n', (10471, 10850), False, 'from django.db import migrations\n'), ((10872, 21363), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""\ndrop materialized view if exists reporting_ocpallcostlineitem_daily_summary;\n\ncreate materialized view if not exists reporting_ocpallcostlineitem_daily_summary as\n SELECT row_number() OVER () AS id,\n lids.source_type,\n lids.cluster_id,\n lids.cluster_alias,\n lids.namespace,\n lids.node,\n lids.resource_id,\n lids.usage_start,\n lids.usage_end,\n lids.usage_account_id,\n lids.account_alias_id,\n lids.product_code,\n lids.product_family,\n lids.instance_type,\n lids.region,\n lids.availability_zone,\n lids.tags,\n lids.usage_amount,\n lids.unit,\n lids.unblended_cost,\n lids.markup_cost,\n lids.currency_code,\n lids.shared_projects,\n lids.project_costs\n FROM ( SELECT \'AWS\'::text AS source_type,\n reporting_ocpawscostlineitem_daily_summary.cluster_id,\n reporting_ocpawscostlineitem_daily_summary.cluster_alias,\n reporting_ocpawscostlineitem_daily_summary.namespace,\n reporting_ocpawscostlineitem_daily_summary.node,\n reporting_ocpawscostlineitem_daily_summary.resource_id,\n reporting_ocpawscostlineitem_daily_summary.usage_start::date,\n reporting_ocpawscostlineitem_daily_summary.usage_end::date,\n reporting_ocpawscostlineitem_daily_summary.usage_account_id,\n reporting_ocpawscostlineitem_daily_summary.account_alias_id,\n reporting_ocpawscostlineitem_daily_summary.product_code,\n reporting_ocpawscostlineitem_daily_summary.product_family,\n reporting_ocpawscostlineitem_daily_summary.instance_type,\n reporting_ocpawscostlineitem_daily_summary.region,\n reporting_ocpawscostlineitem_daily_summary.availability_zone,\n reporting_ocpawscostlineitem_daily_summary.tags,\n reporting_ocpawscostlineitem_daily_summary.usage_amount,\n reporting_ocpawscostlineitem_daily_summary.unit,\n reporting_ocpawscostlineitem_daily_summary.unblended_cost,\n reporting_ocpawscostlineitem_daily_summary.markup_cost,\n reporting_ocpawscostlineitem_daily_summary.currency_code,\n reporting_ocpawscostlineitem_daily_summary.shared_projects,\n reporting_ocpawscostlineitem_daily_summary.project_costs\n FROM reporting_ocpawscostlineitem_daily_summary\n WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)\n UNION\n SELECT \'Azure\'::text AS source_type,\n reporting_ocpazurecostlineitem_daily_summary.cluster_id,\n reporting_ocpazurecostlineitem_daily_summary.cluster_alias,\n reporting_ocpazurecostlineitem_daily_summary.namespace,\n reporting_ocpazurecostlineitem_daily_summary.node,\n reporting_ocpazurecostlineitem_daily_summary.resource_id,\n reporting_ocpazurecostlineitem_daily_summary.usage_start::date,\n reporting_ocpazurecostlineitem_daily_summary.usage_end::date,\n reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id,\n NULL::integer AS account_alias_id,\n reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code,\n NULL::character varying AS product_family,\n reporting_ocpazurecostlineitem_daily_summary.instance_type,\n reporting_ocpazurecostlineitem_daily_summary.resource_location AS region,\n NULL::character varying AS availability_zone,\n reporting_ocpazurecostlineitem_daily_summary.tags,\n reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount,\n reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit,\n reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost,\n reporting_ocpazurecostlineitem_daily_summary.markup_cost,\n reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code,\n reporting_ocpazurecostlineitem_daily_summary.shared_projects,\n reporting_ocpazurecostlineitem_daily_summary.project_costs\n FROM reporting_ocpazurecostlineitem_daily_summary\n WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)) lids\n with no data;\n\ncreate index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix\n on reporting_ocpallcostlineitem_daily_summary using gin (namespace);\ncreate index mv_reporting_ocpallcostlineitem_daily_summary_node_ix\n on reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops);\ncreate index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix\n on reporting_ocpallcostlineitem_daily_summary (usage_start);\n\n\ndrop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;\n\ncreate materialized view if not exists reporting_ocpallcostlineitem_project_daily_summary as\n SELECT row_number() OVER () AS id,\n lids.source_type,\n lids.cluster_id,\n lids.cluster_alias,\n lids.data_source,\n lids.namespace,\n lids.node,\n lids.pod_labels,\n lids.resource_id,\n lids.usage_start,\n lids.usage_end,\n lids.usage_account_id,\n lids.account_alias_id,\n lids.product_code,\n lids.product_family,\n lids.instance_type,\n lids.region,\n lids.availability_zone,\n lids.usage_amount,\n lids.unit,\n lids.unblended_cost,\n lids.project_markup_cost,\n lids.pod_cost,\n lids.currency_code\n FROM ( SELECT \'AWS\'::text AS source_type,\n reporting_ocpawscostlineitem_project_daily_summary.cluster_id,\n reporting_ocpawscostlineitem_project_daily_summary.cluster_alias,\n reporting_ocpawscostlineitem_project_daily_summary.data_source,\n reporting_ocpawscostlineitem_project_daily_summary.namespace,\n reporting_ocpawscostlineitem_project_daily_summary.node,\n reporting_ocpawscostlineitem_project_daily_summary.pod_labels,\n reporting_ocpawscostlineitem_project_daily_summary.resource_id,\n reporting_ocpawscostlineitem_project_daily_summary.usage_start::date,\n reporting_ocpawscostlineitem_project_daily_summary.usage_end::date,\n reporting_ocpawscostlineitem_project_daily_summary.usage_account_id,\n reporting_ocpawscostlineitem_project_daily_summary.account_alias_id,\n reporting_ocpawscostlineitem_project_daily_summary.product_code,\n reporting_ocpawscostlineitem_project_daily_summary.product_family,\n reporting_ocpawscostlineitem_project_daily_summary.instance_type,\n reporting_ocpawscostlineitem_project_daily_summary.region,\n reporting_ocpawscostlineitem_project_daily_summary.availability_zone,\n reporting_ocpawscostlineitem_project_daily_summary.usage_amount,\n reporting_ocpawscostlineitem_project_daily_summary.unit,\n reporting_ocpawscostlineitem_project_daily_summary.unblended_cost,\n reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost,\n reporting_ocpawscostlineitem_project_daily_summary.pod_cost,\n reporting_ocpawscostlineitem_project_daily_summary.currency_code\n FROM reporting_ocpawscostlineitem_project_daily_summary\n WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)\n UNION\n SELECT \'Azure\'::text AS source_type,\n reporting_ocpazurecostlineitem_project_daily_summary.cluster_id,\n reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias,\n reporting_ocpazurecostlineitem_project_daily_summary.data_source,\n reporting_ocpazurecostlineitem_project_daily_summary.namespace,\n reporting_ocpazurecostlineitem_project_daily_summary.node,\n reporting_ocpazurecostlineitem_project_daily_summary.pod_labels,\n reporting_ocpazurecostlineitem_project_daily_summary.resource_id,\n reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date,\n reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date,\n reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id,\n NULL::integer AS account_alias_id,\n reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code,\n NULL::character varying AS product_family,\n reporting_ocpazurecostlineitem_project_daily_summary.instance_type,\n reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region,\n NULL::character varying AS availability_zone,\n reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount,\n reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit,\n reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost,\n reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost,\n reporting_ocpazurecostlineitem_project_daily_summary.pod_cost,\n reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code\n FROM reporting_ocpazurecostlineitem_project_daily_summary\n WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)) lids\n with no data;\n\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix\n on reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix\n on reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix\n on reporting_ocpallcostlineitem_project_daily_summary using gin (namespace gin_trgm_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix\n on reporting_ocpallcostlineitem_project_daily_summary using gin (node gin_trgm_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix\n on reporting_ocpallcostlineitem_project_daily_summary (usage_start);\n """'], {}), '(\n """\ndrop materialized view if exists reporting_ocpallcostlineitem_daily_summary;\n\ncreate materialized view if not exists reporting_ocpallcostlineitem_daily_summary as\n SELECT row_number() OVER () AS id,\n lids.source_type,\n lids.cluster_id,\n lids.cluster_alias,\n lids.namespace,\n lids.node,\n lids.resource_id,\n lids.usage_start,\n lids.usage_end,\n lids.usage_account_id,\n lids.account_alias_id,\n lids.product_code,\n lids.product_family,\n lids.instance_type,\n lids.region,\n lids.availability_zone,\n lids.tags,\n lids.usage_amount,\n lids.unit,\n lids.unblended_cost,\n lids.markup_cost,\n lids.currency_code,\n lids.shared_projects,\n lids.project_costs\n FROM ( SELECT \'AWS\'::text AS source_type,\n reporting_ocpawscostlineitem_daily_summary.cluster_id,\n reporting_ocpawscostlineitem_daily_summary.cluster_alias,\n reporting_ocpawscostlineitem_daily_summary.namespace,\n reporting_ocpawscostlineitem_daily_summary.node,\n reporting_ocpawscostlineitem_daily_summary.resource_id,\n reporting_ocpawscostlineitem_daily_summary.usage_start::date,\n reporting_ocpawscostlineitem_daily_summary.usage_end::date,\n reporting_ocpawscostlineitem_daily_summary.usage_account_id,\n reporting_ocpawscostlineitem_daily_summary.account_alias_id,\n reporting_ocpawscostlineitem_daily_summary.product_code,\n reporting_ocpawscostlineitem_daily_summary.product_family,\n reporting_ocpawscostlineitem_daily_summary.instance_type,\n reporting_ocpawscostlineitem_daily_summary.region,\n reporting_ocpawscostlineitem_daily_summary.availability_zone,\n reporting_ocpawscostlineitem_daily_summary.tags,\n reporting_ocpawscostlineitem_daily_summary.usage_amount,\n reporting_ocpawscostlineitem_daily_summary.unit,\n reporting_ocpawscostlineitem_daily_summary.unblended_cost,\n reporting_ocpawscostlineitem_daily_summary.markup_cost,\n reporting_ocpawscostlineitem_daily_summary.currency_code,\n reporting_ocpawscostlineitem_daily_summary.shared_projects,\n reporting_ocpawscostlineitem_daily_summary.project_costs\n FROM reporting_ocpawscostlineitem_daily_summary\n WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)\n UNION\n SELECT \'Azure\'::text AS source_type,\n reporting_ocpazurecostlineitem_daily_summary.cluster_id,\n reporting_ocpazurecostlineitem_daily_summary.cluster_alias,\n reporting_ocpazurecostlineitem_daily_summary.namespace,\n reporting_ocpazurecostlineitem_daily_summary.node,\n reporting_ocpazurecostlineitem_daily_summary.resource_id,\n reporting_ocpazurecostlineitem_daily_summary.usage_start::date,\n reporting_ocpazurecostlineitem_daily_summary.usage_end::date,\n reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id,\n NULL::integer AS account_alias_id,\n reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code,\n NULL::character varying AS product_family,\n reporting_ocpazurecostlineitem_daily_summary.instance_type,\n reporting_ocpazurecostlineitem_daily_summary.resource_location AS region,\n NULL::character varying AS availability_zone,\n reporting_ocpazurecostlineitem_daily_summary.tags,\n reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount,\n reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit,\n reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost,\n reporting_ocpazurecostlineitem_daily_summary.markup_cost,\n reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code,\n reporting_ocpazurecostlineitem_daily_summary.shared_projects,\n reporting_ocpazurecostlineitem_daily_summary.project_costs\n FROM reporting_ocpazurecostlineitem_daily_summary\n WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)) lids\n with no data;\n\ncreate index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix\n on reporting_ocpallcostlineitem_daily_summary using gin (namespace);\ncreate index mv_reporting_ocpallcostlineitem_daily_summary_node_ix\n on reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops);\ncreate index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix\n on reporting_ocpallcostlineitem_daily_summary (usage_start);\n\n\ndrop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;\n\ncreate materialized view if not exists reporting_ocpallcostlineitem_project_daily_summary as\n SELECT row_number() OVER () AS id,\n lids.source_type,\n lids.cluster_id,\n lids.cluster_alias,\n lids.data_source,\n lids.namespace,\n lids.node,\n lids.pod_labels,\n lids.resource_id,\n lids.usage_start,\n lids.usage_end,\n lids.usage_account_id,\n lids.account_alias_id,\n lids.product_code,\n lids.product_family,\n lids.instance_type,\n lids.region,\n lids.availability_zone,\n lids.usage_amount,\n lids.unit,\n lids.unblended_cost,\n lids.project_markup_cost,\n lids.pod_cost,\n lids.currency_code\n FROM ( SELECT \'AWS\'::text AS source_type,\n reporting_ocpawscostlineitem_project_daily_summary.cluster_id,\n reporting_ocpawscostlineitem_project_daily_summary.cluster_alias,\n reporting_ocpawscostlineitem_project_daily_summary.data_source,\n reporting_ocpawscostlineitem_project_daily_summary.namespace,\n reporting_ocpawscostlineitem_project_daily_summary.node,\n reporting_ocpawscostlineitem_project_daily_summary.pod_labels,\n reporting_ocpawscostlineitem_project_daily_summary.resource_id,\n reporting_ocpawscostlineitem_project_daily_summary.usage_start::date,\n reporting_ocpawscostlineitem_project_daily_summary.usage_end::date,\n reporting_ocpawscostlineitem_project_daily_summary.usage_account_id,\n reporting_ocpawscostlineitem_project_daily_summary.account_alias_id,\n reporting_ocpawscostlineitem_project_daily_summary.product_code,\n reporting_ocpawscostlineitem_project_daily_summary.product_family,\n reporting_ocpawscostlineitem_project_daily_summary.instance_type,\n reporting_ocpawscostlineitem_project_daily_summary.region,\n reporting_ocpawscostlineitem_project_daily_summary.availability_zone,\n reporting_ocpawscostlineitem_project_daily_summary.usage_amount,\n reporting_ocpawscostlineitem_project_daily_summary.unit,\n reporting_ocpawscostlineitem_project_daily_summary.unblended_cost,\n reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost,\n reporting_ocpawscostlineitem_project_daily_summary.pod_cost,\n reporting_ocpawscostlineitem_project_daily_summary.currency_code\n FROM reporting_ocpawscostlineitem_project_daily_summary\n WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)\n UNION\n SELECT \'Azure\'::text AS source_type,\n reporting_ocpazurecostlineitem_project_daily_summary.cluster_id,\n reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias,\n reporting_ocpazurecostlineitem_project_daily_summary.data_source,\n reporting_ocpazurecostlineitem_project_daily_summary.namespace,\n reporting_ocpazurecostlineitem_project_daily_summary.node,\n reporting_ocpazurecostlineitem_project_daily_summary.pod_labels,\n reporting_ocpazurecostlineitem_project_daily_summary.resource_id,\n reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date,\n reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date,\n reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id,\n NULL::integer AS account_alias_id,\n reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code,\n NULL::character varying AS product_family,\n reporting_ocpazurecostlineitem_project_daily_summary.instance_type,\n reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region,\n NULL::character varying AS availability_zone,\n reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount,\n reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit,\n reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost,\n reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost,\n reporting_ocpazurecostlineitem_project_daily_summary.pod_cost,\n reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code\n FROM reporting_ocpazurecostlineitem_project_daily_summary\n WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc(\'month\'::text, date_trunc(\'month\'::text, now()) - \'1 day\'::interval day)) lids\n with no data;\n\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix\n on reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix\n on reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix\n on reporting_ocpallcostlineitem_project_daily_summary using gin (namespace gin_trgm_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix\n on reporting_ocpallcostlineitem_project_daily_summary using gin (node gin_trgm_ops);\ncreate index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix\n on reporting_ocpallcostlineitem_project_daily_summary (usage_start);\n """\n )\n', (10889, 21363), False, 'from django.db import migrations\n'), ((21385, 21581), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""\nrefresh materialized view reporting_ocpallcostlineitem_daily_summary;\nrefresh materialized view reporting_ocpallcostlineitem_project_daily_summary;\n """'], {}), '(\n """\nrefresh materialized view reporting_ocpallcostlineitem_daily_summary;\nrefresh materialized view reporting_ocpallcostlineitem_project_daily_summary;\n """\n )\n', (21402, 21581), False, 'from django.db import migrations\n'), ((1736, 1754), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (1752, 1754), False, 'from django.db import models\n'), ((1887, 1905), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (1903, 1905), False, 'from django.db import models\n'), ((2031, 2049), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (2047, 2049), False, 'from django.db import models\n'), ((2177, 2195), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (2193, 2195), False, 'from django.db import models\n'), ((2328, 2346), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (2344, 2346), False, 'from django.db import models\n'), ((2481, 2499), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (2497, 2499), False, 'from django.db import models\n'), ((2603, 2621), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (2619, 2621), False, 'from django.db import models\n'), ((2718, 2736), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (2734, 2736), False, 'from django.db import models\n'), ((2833, 2854), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2852, 2854), False, 'from django.db import models\n'), ((2947, 2965), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (2963, 2965), False, 'from django.db import models\n'), ((3060, 3078), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (3076, 3078), False, 'from django.db import models\n'), ((3178, 3196), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (3194, 3196), False, 'from django.db import models\n'), ((3298, 3316), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (3314, 3316), False, 'from django.db import models\n'), ((3423, 3522), 'django.db.models.Index', 'models.Index', ([], {'fields': "['node']", 'name': '"""cost_summary_node_idx"""', 'opclasses': "['varchar_pattern_ops']"}), "(fields=['node'], name='cost_summary_node_idx', opclasses=[\n 'varchar_pattern_ops'])\n", (3435, 3522), False, 'from django.db import models\n'), ((3641, 3751), 'django.db.models.Index', 'models.Index', ([], {'fields': "['namespace']", 'name': '"""cost__proj_sum_namespace_idx"""', 'opclasses': "['varchar_pattern_ops']"}), "(fields=['namespace'], name='cost__proj_sum_namespace_idx',\n opclasses=['varchar_pattern_ops'])\n", (3653, 3751), False, 'from django.db import models\n'), ((3901, 4001), 'django.db.models.Index', 'models.Index', ([], {'fields': "['node']", 'name': '"""cost_proj_sum_node_idx"""', 'opclasses': "['varchar_pattern_ops']"}), "(fields=['node'], name='cost_proj_sum_node_idx', opclasses=[\n 'varchar_pattern_ops'])\n", (3913, 4001), False, 'from django.db import models\n'), ((4115, 4210), 'django.db.models.Index', 'models.Index', ([], {'fields': "['node']", 'name': '"""ocpazure_node_idx"""', 'opclasses': "['varchar_pattern_ops']"}), "(fields=['node'], name='ocpazure_node_idx', opclasses=[\n 'varchar_pattern_ops'])\n", (4127, 4210), False, 'from django.db import models\n'), ((4331, 4440), 'django.db.models.Index', 'models.Index', ([], {'fields': "['namespace']", 'name': '"""ocpazure_proj_namespace_idx"""', 'opclasses': "['varchar_pattern_ops']"}), "(fields=['namespace'], name='ocpazure_proj_namespace_idx',\n opclasses=['varchar_pattern_ops'])\n", (4343, 4440), False, 'from django.db import models\n'), ((4592, 4692), 'django.db.models.Index', 'models.Index', ([], {'fields': "['node']", 'name': '"""ocpazure_proj_node_idx"""', 'opclasses': "['varchar_pattern_ops']"}), "(fields=['node'], name='ocpazure_proj_node_idx', opclasses=[\n 'varchar_pattern_ops'])\n", (4604, 4692), False, 'from django.db import models\n'), ((4797, 4907), 'django.db.models.Index', 'models.Index', ([], {'fields': "['namespace']", 'name': '"""ocp_storage_li_namespace_idx"""', 'opclasses': "['varchar_pattern_ops']"}), "(fields=['namespace'], name='ocp_storage_li_namespace_idx',\n opclasses=['varchar_pattern_ops'])\n", (4809, 4907), False, 'from django.db import models\n'), ((5043, 5144), 'django.db.models.Index', 'models.Index', ([], {'fields': "['node']", 'name': '"""ocp_storage_li_node_idx"""', 'opclasses': "['varchar_pattern_ops']"}), "(fields=['node'], name='ocp_storage_li_node_idx', opclasses=[\n 'varchar_pattern_ops'])\n", (5055, 5144), False, 'from django.db import models\n'), ((5247, 5343), 'django.db.models.Index', 'models.Index', ([], {'fields': "['namespace']", 'name': '"""namespace_idx"""', 'opclasses': "['varchar_pattern_ops']"}), "(fields=['namespace'], name='namespace_idx', opclasses=[\n 'varchar_pattern_ops'])\n", (5259, 5343), False, 'from django.db import models\n'), ((5446, 5532), 'django.db.models.Index', 'models.Index', ([], {'fields': "['node']", 'name': '"""node_idx"""', 'opclasses': "['varchar_pattern_ops']"}), "(fields=['node'], name='node_idx', opclasses=[\n 'varchar_pattern_ops'])\n", (5458, 5532), False, 'from django.db import models\n'), ((5642, 5746), 'django.db.models.Index', 'models.Index', ([], {'fields': "['namespace']", 'name': '"""summary_namespace_idx"""', 'opclasses': "['varchar_pattern_ops']"}), "(fields=['namespace'], name='summary_namespace_idx', opclasses=\n ['varchar_pattern_ops'])\n", (5654, 5746), False, 'from django.db import models\n'), ((5856, 5950), 'django.db.models.Index', 'models.Index', ([], {'fields': "['node']", 'name': '"""summary_node_idx"""', 'opclasses': "['varchar_pattern_ops']"}), "(fields=['node'], name='summary_node_idx', opclasses=[\n 'varchar_pattern_ops'])\n", (5868, 5950), False, 'from django.db import models\n'), ((6038, 6056), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (6054, 6056), False, 'from django.db import models\n'), ((6141, 6159), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (6157, 6159), False, 'from django.db import models\n'), ((6235, 6306), 'django.db.models.Index', 'models.Index', ([], {'fields': "['usage_start']", 'name': '"""ocpcostsum_usage_start_idx"""'}), "(fields=['usage_start'], name='ocpcostsum_usage_start_idx')\n", (6247, 6306), False, 'from django.db import models\n'), ((6403, 6509), 'django.db.models.Index', 'models.Index', ([], {'fields': "['namespace']", 'name': '"""ocpcostsum_namespace_idx"""', 'opclasses': "['varchar_pattern_ops']"}), "(fields=['namespace'], name='ocpcostsum_namespace_idx',\n opclasses=['varchar_pattern_ops'])\n", (6415, 6509), False, 'from django.db import models\n'), ((6633, 6730), 'django.db.models.Index', 'models.Index', ([], {'fields': "['node']", 'name': '"""ocpcostsum_node_idx"""', 'opclasses': "['varchar_pattern_ops']"}), "(fields=['node'], name='ocpcostsum_node_idx', opclasses=[\n 'varchar_pattern_ops'])\n", (6645, 6730), False, 'from django.db import models\n')]
|
import os
import sys
import time
import random
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
import numpy as np
from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager
from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from simclr_model import FeaturesModel as Model
from test import validation
from byol_pytorch import BYOL
from imgaug import augmenters as iaa
import imgaug as ia
from tqdm import tqdm
import matplotlib.pyplot as plt
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(opt):
""" dataset preparation """
if not opt.data_filtering_off:
print('Filtering the images containing characters which are not in opt.character')
print('Filtering the images whose label is longer than opt.batch_max_length')
# see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130
opt.select_data = opt.select_data.split('-')
opt.batch_ratio = opt.batch_ratio.split('-')
train_dataset = Batch_Balanced_Dataset(opt)
log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')
ia.seed(1)
image_transforms = iaa.Sequential([iaa.SomeOf((1, 5),
[iaa.LinearContrast((0.5, 1.0)),
iaa.GaussianBlur((0.5, 1.5)),
iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True),
iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True),
iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)),
iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'),
iaa.PerspectiveTransform(scale=(0.01, 0.02))],
random_order=True)])
AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms)
valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=opt.batch_size,
shuffle=True, # 'True' to check training progress with validation function.
num_workers=int(opt.workers),
collate_fn=AlignCollate_valid, pin_memory=True)
log.write(valid_dataset_log)
print('-' * 80)
log.write('-' * 80 + '\n')
log.close()
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling)
# weight initialization
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception as e: # for batchnorm.
if 'weight' in name:
param.data.fill_(1)
continue
# data parallel for multi-GPU
model = torch.nn.DataParallel(model).to(device)
model.train()
if opt.saved_model != '':
print(f'loading pretrained model from {opt.saved_model}')
if opt.FT:
model.load_state_dict(torch.load(opt.saved_model), strict=False)
else:
model.load_state_dict(torch.load(opt.saved_model))
print("Model:")
print(model)
image_transforms = iaa.Sequential([iaa.SomeOf((1, 5),
[iaa.LinearContrast((0.5, 1.0)),
iaa.GaussianBlur((0.5, 1.5)),
iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True),
iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True),
iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)),
iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'),
iaa.PerspectiveTransform(scale=(0.01, 0.02))],
random_order=True)])
byol_learner = BYOL(
model,
image_size=(32,100),
hidden_layer=-1,
channels=1,
augment_fn=image_transforms,
augmented=True)
print(byol_learner)
# filter that only require gradient decent
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, byol_learner.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print('Trainable params num : ', sum(params_num))
# setup optimizer
if opt.optimizer == 'adam':
optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))
elif opt.optimizer == 'adadelta':
optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps, weight_decay=opt.weight_decay)
elif opt.optimizer == 'sgd':
optimizer = optim.SGD(filtered_parameters, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=opt.nesterov)
else:
raise Exception('Unknown optimizer')
print("Optimizer:")
print(optimizer)
""" final options """
# print(opt)
with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
print(opt_log)
opt_file.write(opt_log)
""" start training """
start_iter = 0
if opt.saved_model != '':
try:
start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])
print(f'continue to train, start_iter: {start_iter}')
except:
pass
#LR Scheduler:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)], last_epoch=start_iter-1, gamma=0.1)
best_loss = None
iteration = start_iter
print(device)
loss_avg = Averager()
valid_loss_avg = Averager()
# kl_loss_avg = Averager()
# kl_loss = torch.nn.KLDivLoss()
epoch = 0
while(True):
# train part
for i in tqdm(range(opt.valInterval)):
image_tensors, _ = train_dataset.get_batch()
image = image_tensors.to(device)
optimizer.zero_grad()
loss = byol_learner(image)
loss.backward()
if opt.grad_clip:
torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip)
optimizer.step()
scheduler.step()
byol_learner.update_moving_average()
loss_avg.add(loss)
if iteration==0:
print("Epoch {:06d} Loss: {:.04f}".format(iteration, loss_avg.val()))
iteration += 1
byol_learner.eval()
model.eval()
with torch.no_grad():
for image_tensors, _ in valid_loader:
image = image_tensors.to(device)
val_loss = byol_learner(image)
valid_loss_avg.add(val_loss)
# features = model(image)
# features = features.view(-1, 26, features.shape[1])
# kl_div = kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):])
# kl_loss_avg.add(kl_div)
model.train()
byol_learner.train()
with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:
log.write("Iteration {:06d} Loss: {:.06f} Val loss: {:06f}".format(iteration, loss_avg.val(), valid_loss_avg.val()) + '\n')
print("Iteration {:06d} Loss: {:.06f} Val loss: {:06f}".format(iteration, loss_avg.val(), valid_loss_avg.val()))
if best_loss is None:
best_loss = valid_loss_avg.val()
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
elif best_loss > valid_loss_avg.val():
best_loss = valid_loss_avg.val()
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
scheduler.step()
loss_avg.reset()
valid_loss_avg.reset()
if epoch % 5 == 0:
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
if (iteration + 1) >= opt.num_iter:
print('end the training')
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
sys.exit()
epoch +=1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', help='Where to store logs and models')
parser.add_argument('--train_data', required=True, help='path to training dataset')
parser.add_argument('--valid_data', required=True, help='path to validation dataset')
parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch_size', type=int, default=192, help='input batch size')
parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')
parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation')
parser.add_argument('--saved_model', default='', help="path to model to continue training")
parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')
parser.add_argument('--optimizer', type=str, choices=['adam', 'adadelta', 'sgd'], help="Optimizer")
parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')
parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')
parser.add_argument('--nesterov', action='store_true', help='Use Nesterov momentum for SGD')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for SGD')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')
parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode')
""" Data processing """
parser.add_argument('--select_data', type=str, default='MJ-ST',
help='select training data (default is MJ-ST, which means MJ and ST used as training data)')
parser.add_argument('--batch_ratio', type=str, default='0.5-0.5',
help='assign ratio for each selected data in the batch')
parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',
help='total data usage ratio, this ratio is multiplied to total number of data.')
parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str,
default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, required=True,
help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1,
help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
parser.add_argument('--weight_decay', type=float, default=10e-4, help='Weight decay')
parser.add_argument('--FinalLayer', action='store_true', help='Use a nonlinear projection head during training')
parser.add_argument('--final_feature', type=int, default=256, help='the size of the output of the final layer')
opt = parser.parse_args()
if not opt.exp_name:
opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL'
opt.exp_name += f'-Seed{opt.manualSeed}'
# print(opt.exp_name)
os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)
""" vocab / character number configuration """
if opt.sensitive:
# opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
""" Seed and GPU setting """
# print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
# print('device count', opt.num_gpu)
if opt.num_gpu > 1:
print('------ Use multi-GPU setting ------')
print('if you stuck too long time with multi-GPU setting, try to set --workers 0')
# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1
opt.workers = opt.workers * opt.num_gpu
opt.batch_size = opt.batch_size * opt.num_gpu
""" previous version
print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)
opt.batch_size = opt.batch_size * opt.num_gpu
print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')
If you dont care about it, just commnet out these line.)
opt.num_iter = int(opt.num_iter / opt.num_gpu)
"""
train(opt)
|
[
"imgaug.augmenters.PiecewiseAffine",
"torch.nn.init.constant_",
"imgaug.augmenters.GaussianBlur",
"simclr_dataset.AlignCollate",
"torch.cuda.device_count",
"simclr_dataset.Batch_Balanced_Dataset",
"torch.cuda.is_available",
"sys.exit",
"argparse.ArgumentParser",
"imgaug.augmenters.Crop",
"torch.nn.init.kaiming_normal_",
"numpy.random.seed",
"imgaug.augmenters.LinearContrast",
"imgaug.augmenters.Sharpen",
"simclr_dataset.hierarchical_dataset",
"torch.optim.Adadelta",
"torch.optim.SGD",
"simclr_model.FeaturesModel",
"imgaug.augmenters.PerspectiveTransform",
"utils.Averager",
"torch.manual_seed",
"torch.optim.Adam",
"os.makedirs",
"torch.load",
"torch.nn.DataParallel",
"random.seed",
"imgaug.seed",
"torch.no_grad",
"torch.cuda.manual_seed",
"byol_pytorch.BYOL"
] |
[((1220, 1247), 'simclr_dataset.Batch_Balanced_Dataset', 'Batch_Balanced_Dataset', (['opt'], {}), '(opt)\n', (1242, 1247), False, 'from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset\n'), ((1328, 1338), 'imgaug.seed', 'ia.seed', (['(1)'], {}), '(1)\n', (1335, 1338), True, 'import imgaug as ia\n'), ((2014, 2124), 'simclr_dataset.AlignCollate', 'AlignCollate', ([], {'imgH': 'opt.imgH', 'imgW': 'opt.imgW', 'keep_ratio_with_pad': 'opt.PAD', 'image_transforms': 'image_transforms'}), '(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD,\n image_transforms=image_transforms)\n', (2026, 2124), False, 'from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset\n'), ((2160, 2210), 'simclr_dataset.hierarchical_dataset', 'hierarchical_dataset', ([], {'root': 'opt.valid_data', 'opt': 'opt'}), '(root=opt.valid_data, opt=opt)\n', (2180, 2210), False, 'from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset\n'), ((2651, 2661), 'simclr_model.FeaturesModel', 'Model', (['opt'], {}), '(opt)\n', (2656, 2661), True, 'from simclr_model import FeaturesModel as Model\n'), ((4486, 4598), 'byol_pytorch.BYOL', 'BYOL', (['model'], {'image_size': '(32, 100)', 'hidden_layer': '(-1)', 'channels': '(1)', 'augment_fn': 'image_transforms', 'augmented': '(True)'}), '(model, image_size=(32, 100), hidden_layer=-1, channels=1, augment_fn=\n image_transforms, augmented=True)\n', (4490, 4598), False, 'from byol_pytorch import BYOL\n'), ((6485, 6495), 'utils.Averager', 'Averager', ([], {}), '()\n', (6493, 6495), False, 'from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager\n'), ((6517, 6527), 'utils.Averager', 'Averager', ([], {}), '()\n', (6525, 6527), False, 'from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager\n'), ((9091, 9116), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9114, 9116), False, 'import argparse\n'), ((13778, 13838), 'os.makedirs', 'os.makedirs', (['f"""./saved_models/{opt.exp_name}"""'], {'exist_ok': '(True)'}), "(f'./saved_models/{opt.exp_name}', exist_ok=True)\n", (13789, 13838), False, 'import os\n'), ((14140, 14167), 'random.seed', 'random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (14151, 14167), False, 'import random\n'), ((14172, 14202), 'numpy.random.seed', 'np.random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (14186, 14202), True, 'import numpy as np\n'), ((14207, 14240), 'torch.manual_seed', 'torch.manual_seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (14224, 14240), False, 'import torch\n'), ((14245, 14283), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (14267, 14283), False, 'import torch\n'), ((14361, 14386), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (14384, 14386), False, 'import torch\n'), ((666, 691), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (689, 691), False, 'import torch\n'), ((5052, 5120), 'torch.optim.Adam', 'optim.Adam', (['filtered_parameters'], {'lr': 'opt.lr', 'betas': '(opt.beta1, 0.999)'}), '(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))\n', (5062, 5120), True, 'import torch.optim as optim\n'), ((3452, 3480), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (3473, 3480), False, 'import torch\n'), ((5179, 5286), 'torch.optim.Adadelta', 'optim.Adadelta', (['filtered_parameters'], {'lr': 'opt.lr', 'rho': 'opt.rho', 'eps': 'opt.eps', 'weight_decay': 'opt.weight_decay'}), '(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps,\n weight_decay=opt.weight_decay)\n', (5193, 5286), True, 'import torch.optim as optim\n'), ((7377, 7392), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7390, 7392), False, 'import torch\n'), ((9011, 9021), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9019, 9021), False, 'import sys\n'), ((3160, 3186), 'torch.nn.init.constant_', 'init.constant_', (['param', '(0.0)'], {}), '(param, 0.0)\n', (3174, 3186), True, 'import torch.nn.init as init\n'), ((3659, 3686), 'torch.load', 'torch.load', (['opt.saved_model'], {}), '(opt.saved_model)\n', (3669, 3686), False, 'import torch\n'), ((3750, 3777), 'torch.load', 'torch.load', (['opt.saved_model'], {}), '(opt.saved_model)\n', (3760, 3777), False, 'import torch\n'), ((5336, 5458), 'torch.optim.SGD', 'optim.SGD', (['filtered_parameters'], {'lr': 'opt.lr', 'momentum': 'opt.momentum', 'weight_decay': 'opt.weight_decay', 'nesterov': 'opt.nesterov'}), '(filtered_parameters, lr=opt.lr, momentum=opt.momentum,\n weight_decay=opt.weight_decay, nesterov=opt.nesterov)\n', (5345, 5458), True, 'import torch.optim as optim\n'), ((1425, 1455), 'imgaug.augmenters.LinearContrast', 'iaa.LinearContrast', (['(0.5, 1.0)'], {}), '((0.5, 1.0))\n', (1443, 1455), True, 'from imgaug import augmenters as iaa\n'), ((1483, 1511), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', (['(0.5, 1.5)'], {}), '((0.5, 1.5))\n', (1499, 1511), True, 'from imgaug import augmenters as iaa\n'), ((1539, 1611), 'imgaug.augmenters.Crop', 'iaa.Crop', ([], {'percent': '((0, 0.4), (0, 0), (0, 0.4), (0, 0.0))', 'keep_size': '(True)'}), '(percent=((0, 0.4), (0, 0), (0, 0.4), (0, 0.0)), keep_size=True)\n', (1547, 1611), True, 'from imgaug import augmenters as iaa\n'), ((1636, 1710), 'imgaug.augmenters.Crop', 'iaa.Crop', ([], {'percent': '((0, 0.0), (0, 0.02), (0, 0), (0, 0.02))', 'keep_size': '(True)'}), '(percent=((0, 0.0), (0, 0.02), (0, 0), (0, 0.02)), keep_size=True)\n', (1644, 1710), True, 'from imgaug import augmenters as iaa\n'), ((1735, 1786), 'imgaug.augmenters.Sharpen', 'iaa.Sharpen', ([], {'alpha': '(0.0, 0.5)', 'lightness': '(0.0, 0.5)'}), '(alpha=(0.0, 0.5), lightness=(0.0, 0.5))\n', (1746, 1786), True, 'from imgaug import augmenters as iaa\n'), ((1814, 1866), 'imgaug.augmenters.PiecewiseAffine', 'iaa.PiecewiseAffine', ([], {'scale': '(0.02, 0.03)', 'mode': '"""edge"""'}), "(scale=(0.02, 0.03), mode='edge')\n", (1833, 1866), True, 'from imgaug import augmenters as iaa\n'), ((1894, 1938), 'imgaug.augmenters.PerspectiveTransform', 'iaa.PerspectiveTransform', ([], {'scale': '(0.01, 0.02)'}), '(scale=(0.01, 0.02))\n', (1918, 1938), True, 'from imgaug import augmenters as iaa\n'), ((3238, 3265), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['param'], {}), '(param)\n', (3258, 3265), True, 'import torch.nn.init as init\n'), ((3903, 3933), 'imgaug.augmenters.LinearContrast', 'iaa.LinearContrast', (['(0.5, 1.0)'], {}), '((0.5, 1.0))\n', (3921, 3933), True, 'from imgaug import augmenters as iaa\n'), ((3961, 3989), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', (['(0.5, 1.5)'], {}), '((0.5, 1.5))\n', (3977, 3989), True, 'from imgaug import augmenters as iaa\n'), ((4017, 4089), 'imgaug.augmenters.Crop', 'iaa.Crop', ([], {'percent': '((0, 0.4), (0, 0), (0, 0.4), (0, 0.0))', 'keep_size': '(True)'}), '(percent=((0, 0.4), (0, 0), (0, 0.4), (0, 0.0)), keep_size=True)\n', (4025, 4089), True, 'from imgaug import augmenters as iaa\n'), ((4114, 4188), 'imgaug.augmenters.Crop', 'iaa.Crop', ([], {'percent': '((0, 0.0), (0, 0.02), (0, 0), (0, 0.02))', 'keep_size': '(True)'}), '(percent=((0, 0.0), (0, 0.02), (0, 0), (0, 0.02)), keep_size=True)\n', (4122, 4188), True, 'from imgaug import augmenters as iaa\n'), ((4213, 4264), 'imgaug.augmenters.Sharpen', 'iaa.Sharpen', ([], {'alpha': '(0.0, 0.5)', 'lightness': '(0.0, 0.5)'}), '(alpha=(0.0, 0.5), lightness=(0.0, 0.5))\n', (4224, 4264), True, 'from imgaug import augmenters as iaa\n'), ((4292, 4344), 'imgaug.augmenters.PiecewiseAffine', 'iaa.PiecewiseAffine', ([], {'scale': '(0.02, 0.03)', 'mode': '"""edge"""'}), "(scale=(0.02, 0.03), mode='edge')\n", (4311, 4344), True, 'from imgaug import augmenters as iaa\n'), ((4372, 4416), 'imgaug.augmenters.PerspectiveTransform', 'iaa.PerspectiveTransform', ([], {'scale': '(0.01, 0.02)'}), '(scale=(0.01, 0.02))\n', (4396, 4416), True, 'from imgaug import augmenters as iaa\n')]
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protobufs/services/feature/actions/get_flags.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='protobufs/services/feature/actions/get_flags.proto',
package='services.feature.actions.get_flags',
syntax='proto3',
serialized_pb=b'\n2protobufs/services/feature/actions/get_flags.proto\x12\"services.feature.actions.get_flags\"\x0b\n\tRequestV1\"\x84\x01\n\nResponseV1\x12H\n\x05\x66lags\x18\x01 \x03(\x0b\x32\x39.services.feature.actions.get_flags.ResponseV1.FlagsEntry\x1a,\n\nFlagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01\x62\x06proto3'
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_REQUESTV1 = _descriptor.Descriptor(
name='RequestV1',
full_name='services.feature.actions.get_flags.RequestV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=90,
serialized_end=101,
)
_RESPONSEV1_FLAGSENTRY = _descriptor.Descriptor(
name='FlagsEntry',
full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.value', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=192,
serialized_end=236,
)
_RESPONSEV1 = _descriptor.Descriptor(
name='ResponseV1',
full_name='services.feature.actions.get_flags.ResponseV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='flags', full_name='services.feature.actions.get_flags.ResponseV1.flags', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RESPONSEV1_FLAGSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=104,
serialized_end=236,
)
_RESPONSEV1_FLAGSENTRY.containing_type = _RESPONSEV1
_RESPONSEV1.fields_by_name['flags'].message_type = _RESPONSEV1_FLAGSENTRY
DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1
DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1
RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict(
DESCRIPTOR = _REQUESTV1,
__module__ = 'protobufs.services.feature.actions.get_flags_pb2'
# @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.RequestV1)
))
_sym_db.RegisterMessage(RequestV1)
ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict(
FlagsEntry = _reflection.GeneratedProtocolMessageType('FlagsEntry', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEV1_FLAGSENTRY,
__module__ = 'protobufs.services.feature.actions.get_flags_pb2'
# @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1.FlagsEntry)
))
,
DESCRIPTOR = _RESPONSEV1,
__module__ = 'protobufs.services.feature.actions.get_flags_pb2'
# @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1)
))
_sym_db.RegisterMessage(ResponseV1)
_sym_db.RegisterMessage(ResponseV1.FlagsEntry)
_RESPONSEV1_FLAGSENTRY.has_options = True
_RESPONSEV1_FLAGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001')
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.descriptor_pb2.MessageOptions",
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.descriptor.FileDescriptor",
"google.protobuf.descriptor.Descriptor"
] |
[((431, 457), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (455, 457), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((475, 996), 'google.protobuf.descriptor.FileDescriptor', '_descriptor.FileDescriptor', ([], {'name': '"""protobufs/services/feature/actions/get_flags.proto"""', 'package': '"""services.feature.actions.get_flags"""', 'syntax': '"""proto3"""', 'serialized_pb': 'b\'\\n2protobufs/services/feature/actions/get_flags.proto\\x12"services.feature.actions.get_flags"\\x0b\\n\\tRequestV1"\\x84\\x01\\n\\nResponseV1\\x12H\\n\\x05flags\\x18\\x01 \\x03(\\x0b29.services.feature.actions.get_flags.ResponseV1.FlagsEntry\\x1a,\\n\\nFlagsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x08:\\x028\\x01b\\x06proto3\''}), '(name=\n \'protobufs/services/feature/actions/get_flags.proto\', package=\n \'services.feature.actions.get_flags\', syntax=\'proto3\', serialized_pb=\n b\'\\n2protobufs/services/feature/actions/get_flags.proto\\x12"services.feature.actions.get_flags"\\x0b\\n\\tRequestV1"\\x84\\x01\\n\\nResponseV1\\x12H\\n\\x05flags\\x18\\x01 \\x03(\\x0b29.services.feature.actions.get_flags.ResponseV1.FlagsEntry\\x1a,\\n\\nFlagsEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x08:\\x028\\x01b\\x06proto3\'\n )\n', (501, 996), True, 'from google.protobuf import descriptor as _descriptor\n'), ((1065, 1422), 'google.protobuf.descriptor.Descriptor', '_descriptor.Descriptor', ([], {'name': '"""RequestV1"""', 'full_name': '"""services.feature.actions.get_flags.RequestV1"""', 'filename': 'None', 'file': 'DESCRIPTOR', 'containing_type': 'None', 'fields': '[]', 'extensions': '[]', 'nested_types': '[]', 'enum_types': '[]', 'options': 'None', 'is_extendable': '(False)', 'syntax': '"""proto3"""', 'extension_ranges': '[]', 'oneofs': '[]', 'serialized_start': '(90)', 'serialized_end': '(101)'}), "(name='RequestV1', full_name=\n 'services.feature.actions.get_flags.RequestV1', filename=None, file=\n DESCRIPTOR, containing_type=None, fields=[], extensions=[],\n nested_types=[], enum_types=[], options=None, is_extendable=False,\n syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=90,\n serialized_end=101)\n", (1087, 1422), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4811, 4842), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ([], {}), '()\n', (4840, 4842), False, 'from google.protobuf import descriptor_pb2\n'), ((2044, 2387), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""value"""', 'full_name': '"""services.feature.actions.get_flags.ResponseV1.FlagsEntry.value"""', 'index': '(1)', 'number': '(2)', 'type': '(8)', 'cpp_type': '(7)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(False)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='value', full_name=\n 'services.feature.actions.get_flags.ResponseV1.FlagsEntry.value', index\n =1, number=2, type=8, cpp_type=7, label=1, has_default_value=False,\n default_value=False, message_type=None, enum_type=None, containing_type\n =None, is_extension=False, extension_scope=None, options=None)\n", (2071, 2387), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2507, 2538), 'google.protobuf.descriptor_pb2.MessageOptions', 'descriptor_pb2.MessageOptions', ([], {}), '()\n', (2536, 2538), False, 'from google.protobuf import descriptor_pb2\n'), ((2876, 3207), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""flags"""', 'full_name': '"""services.feature.actions.get_flags.ResponseV1.flags"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'options': 'None'}), "(name='flags', full_name=\n 'services.feature.actions.get_flags.ResponseV1.flags', index=0, number=\n 1, type=11, cpp_type=10, label=3, has_default_value=False,\n default_value=[], message_type=None, enum_type=None, containing_type=\n None, is_extension=False, extension_scope=None, options=None)\n", (2903, 3207), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
import sys
sys.path.append('../../')
import constants as cnst
import os
os.environ['PYTHONHASHSEED'] = '2'
import tqdm
from model.stg2_generator import StyledGenerator
import numpy as np
from my_utils.visualize_flame_overlay import OverLayViz
from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp
from my_utils.generate_gif import generate_from_flame_sequence
from my_utils.generic_utils import save_set_of_images
from my_utils import compute_fid
import constants
from dataset_loaders import fast_image_reshape
import torch
from my_utils import generic_utils
from my_utils.eye_centering import position_to_given_location
def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):
if normal_map_cond and texture_cond:
return torch.cat((textured_rndr, norm_map), dim=1)
elif normal_map_cond:
return norm_map
elif texture_cond:
return textured_rndr
else:
return flm_params
# General settings
save_images = True
code_size = 236
use_inst_norm = True
core_tensor_res = 4
resolution = 256
alpha = 1
step_max = int(np.log2(resolution) - 2)
root_out_dir = f'{cnst.output_root}sample/'
num_smpl_to_eval_on = 1000
use_styled_conv_stylegan2 = True
flength = 5000
cam_t = np.array([0., 0., 0])
camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)
run_ids_1 = [29, ] # with sqrt(2)
# run_ids_1 = [7, 24, 8, 3]
# run_ids_1 = [7, 8, 3]
settings_for_runs = \
{24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},
7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},
3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}
overlay_visualizer = OverLayViz()
# overlay_visualizer.setup_renderer(mesh_file=None)
flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')
fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
for i, key in enumerate(fl_param_dict):
flame_param = fl_param_dict[key]
flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'],
flame_param['tex'], flame_param['lit'].flatten()))
# tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])
# flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)
# import ipdb; ipdb.set_trace()
flm_params[i, :] = flame_param.astype('float32')
if i == num_smpl_to_eval_on - 1:
break
batch_size = 64
flame_decoder = overlay_visualizer.deca.flame.eval()
for run_idx in run_ids_1:
# import ipdb; ipdb.set_trace()
generator_1 = torch.nn.DataParallel(
StyledGenerator(embedding_vocab_size=69158,
rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],
normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],
core_tensor_res=core_tensor_res,
w_truncation_factor=1.0,
apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],
n_mlp=8)).cuda()
model_idx = settings_for_runs[run_idx]['model_idx']
ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')
generator_1.load_state_dict(ckpt1['generator_running'])
generator_1 = generator_1.eval()
# images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))
pbar.set_description('Generating_images')
flame_mesh_imgs = None
mdl_id = 'mdl2_'
if settings_for_runs[run_idx]['name'] == 'full_model':
mdl_id = 'mdl1_'
for batch_idx in pbar:
flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]
flm_batch = torch.from_numpy(flm_batch).cuda()
flm_batch = position_to_given_location(flame_decoder, flm_batch)
batch_size_true = flm_batch.shape[0]
if settings_for_runs[run_idx]['normal_maps_as_cond'] or \
settings_for_runs[run_idx]['rendered_flame_as_condition']:
cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]
shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]
exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]
pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]
# import ipdb; ipdb.set_trace()
light_code = \
flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))
texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]
norma_map_img, _, _, _, rend_flm = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam)
rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1
norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')
norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
else:
rend_flm = None
norma_map_img = None
gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],
settings_for_runs[run_idx]['rendered_flame_as_condition'])
# torch.manual_seed(2)
identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,
device='cuda')
mdl_1_gen_images = generic_utils.get_images_from_flame_params(
flame_params=gen_1_in.cpu().numpy(), pose=None,
model=generator_1,
step=step_max, alpha=alpha,
input_indices=identity_embeddings.cpu().numpy())
# import ipdb; ipdb.set_trace()
images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()
flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy()
save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}',
images=(images + 1) / 2, show_prog_bar=True)
#save flam rndr
save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison',
settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}',
images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True)
# save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images + 1) / 2).cpu().numpy())
|
[
"my_utils.visualize_flame_overlay.OverLayViz",
"torch.load",
"os.path.join",
"my_utils.generic_utils.save_set_of_images",
"my_utils.flm_dynamic_fit_overlay.camera_ringnetpp",
"dataset_loaders.fast_image_reshape",
"numpy.array",
"numpy.zeros",
"torch.randint",
"my_utils.eye_centering.position_to_given_location",
"model.stg2_generator.StyledGenerator",
"torch.from_numpy",
"numpy.log2",
"numpy.load",
"sys.path.append",
"torch.clamp",
"torch.cat"
] |
[((11, 36), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (26, 36), False, 'import sys\n'), ((1244, 1267), 'numpy.array', 'np.array', (['[0.0, 0.0, 0]'], {}), '([0.0, 0.0, 0])\n', (1252, 1267), True, 'import numpy as np\n'), ((1282, 1338), 'my_utils.flm_dynamic_fit_overlay.camera_ringnetpp', 'camera_ringnetpp', (['(512, 512)'], {'trans': 'cam_t', 'focal': 'flength'}), '((512, 512), trans=cam_t, focal=flength)\n', (1298, 1338), False, 'from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp\n'), ((2360, 2372), 'my_utils.visualize_flame_overlay.OverLayViz', 'OverLayViz', ([], {}), '()\n', (2370, 2372), False, 'from my_utils.visualize_flame_overlay import OverLayViz\n'), ((3909, 3980), 'torch.load', 'torch.load', (['f"""{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model"""'], {}), "(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')\n", (3919, 3980), False, 'import torch\n'), ((770, 813), 'torch.cat', 'torch.cat', (['(textured_rndr, norm_map)'], {'dim': '(1)'}), '((textured_rndr, norm_map), dim=1)\n', (779, 813), False, 'import torch\n'), ((1091, 1110), 'numpy.log2', 'np.log2', (['resolution'], {}), '(resolution)\n', (1098, 1110), True, 'import numpy as np\n'), ((2439, 2481), 'numpy.zeros', 'np.zeros', (['(num_smpl_to_eval_on, code_size)'], {}), '((num_smpl_to_eval_on, code_size))\n', (2447, 2481), True, 'import numpy as np\n'), ((2516, 2570), 'numpy.load', 'np.load', (['cnst.all_flame_params_file'], {'allow_pickle': '(True)'}), '(cnst.all_flame_params_file, allow_pickle=True)\n', (2523, 2570), True, 'import numpy as np\n'), ((4582, 4634), 'my_utils.eye_centering.position_to_given_location', 'position_to_given_location', (['flame_decoder', 'flm_batch'], {}), '(flame_decoder, flm_batch)\n', (4608, 4634), False, 'from my_utils.eye_centering import position_to_given_location\n'), ((6390, 6487), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(69158)', 'size': '(gen_1_in.shape[0],)', 'dtype': 'torch.long', 'device': '"""cuda"""'}), "(low=0, high=69158, size=(gen_1_in.shape[0],), dtype=torch.\n long, device='cuda')\n", (6403, 6487), False, 'import torch\n'), ((7002, 7095), 'os.path.join', 'os.path.join', (['root_out_dir', '"""inter_model_comparison"""', "settings_for_runs[run_idx]['name']"], {}), "(root_out_dir, 'inter_model_comparison', settings_for_runs[\n run_idx]['name'])\n", (7014, 7095), False, 'import os\n'), ((7099, 7226), 'my_utils.generic_utils.save_set_of_images', 'save_set_of_images', ([], {'path': 'save_path_current_id', 'prefix': 'f"""{mdl_id}_{batch_idx}"""', 'images': '((images + 1) / 2)', 'show_prog_bar': '(True)'}), "(path=save_path_current_id, prefix=\n f'{mdl_id}_{batch_idx}', images=(images + 1) / 2, show_prog_bar=True)\n", (7117, 7226), False, 'from my_utils.generic_utils import save_set_of_images\n'), ((7314, 7407), 'os.path.join', 'os.path.join', (['root_out_dir', '"""inter_model_comparison"""', "settings_for_runs[run_idx]['name']"], {}), "(root_out_dir, 'inter_model_comparison', settings_for_runs[\n run_idx]['name'])\n", (7326, 7407), False, 'import os\n'), ((7464, 7605), 'my_utils.generic_utils.save_set_of_images', 'save_set_of_images', ([], {'path': 'save_path_current_id_flm_rndr', 'prefix': 'f"""mesh_{batch_idx}"""', 'images': '((flame_mesh_imgs + 1) / 2)', 'show_prog_bar': '(True)'}), "(path=save_path_current_id_flm_rndr, prefix=\n f'mesh_{batch_idx}', images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True)\n", (7482, 7605), False, 'from my_utils.generic_utils import save_set_of_images\n'), ((5860, 5936), 'dataset_loaders.fast_image_reshape', 'fast_image_reshape', (['rend_flm'], {'height_out': '(256)', 'width_out': '(256)', 'mode': '"""bilinear"""'}), "(rend_flm, height_out=256, width_out=256, mode='bilinear')\n", (5878, 5936), False, 'from dataset_loaders import fast_image_reshape\n'), ((5965, 6051), 'dataset_loaders.fast_image_reshape', 'fast_image_reshape', (['norma_map_img'], {'height_out': '(256)', 'width_out': '(256)', 'mode': '"""bilinear"""'}), "(norma_map_img, height_out=256, width_out=256, mode=\n 'bilinear')\n", (5983, 6051), False, 'from dataset_loaders import fast_image_reshape\n'), ((3338, 3710), 'model.stg2_generator.StyledGenerator', 'StyledGenerator', ([], {'embedding_vocab_size': '(69158)', 'rendered_flame_ascondition': "settings_for_runs[run_idx]['rendered_flame_as_condition']", 'normal_maps_as_cond': "settings_for_runs[run_idx]['normal_maps_as_cond']", 'core_tensor_res': 'core_tensor_res', 'w_truncation_factor': '(1.0)', 'apply_sqrt2_fac_in_eq_lin': "settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin']", 'n_mlp': '(8)'}), "(embedding_vocab_size=69158, rendered_flame_ascondition=\n settings_for_runs[run_idx]['rendered_flame_as_condition'],\n normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],\n core_tensor_res=core_tensor_res, w_truncation_factor=1.0,\n apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx][\n 'apply_sqrt2_fac_in_eq_lin'], n_mlp=8)\n", (3353, 3710), False, 'from model.stg2_generator import StyledGenerator\n'), ((4527, 4554), 'torch.from_numpy', 'torch.from_numpy', (['flm_batch'], {}), '(flm_batch)\n', (4543, 4554), False, 'import torch\n'), ((5732, 5759), 'torch.clamp', 'torch.clamp', (['rend_flm', '(0)', '(1)'], {}), '(rend_flm, 0, 1)\n', (5743, 5759), False, 'import torch\n'), ((5796, 5828), 'torch.clamp', 'torch.clamp', (['norma_map_img', '(0)', '(1)'], {}), '(norma_map_img, 0, 1)\n', (5807, 5828), False, 'import torch\n'), ((6849, 6885), 'torch.clamp', 'torch.clamp', (['mdl_1_gen_images', '(-1)', '(1)'], {}), '(mdl_1_gen_images, -1, 1)\n', (6860, 6885), False, 'import torch\n'), ((6926, 6954), 'torch.clamp', 'torch.clamp', (['rend_flm', '(-1)', '(1)'], {}), '(rend_flm, -1, 1)\n', (6937, 6954), False, 'import torch\n')]
|
import demistomock as demisto
from CommonServerPython import *
""" IMPORTS """
import json
import urllib3
import dateparser
import traceback
from typing import Any, Dict, List, Union
import logging
from argus_api import session as argus_session
from argus_api.api.currentuser.v1.user import get_current_user
from argus_api.api.cases.v2.case import (
add_case_tag,
add_comment,
advanced_case_search,
close_case,
create_case,
delete_case,
delete_comment,
download_attachment,
edit_comment,
get_attachment,
get_case_metadata_by_id,
list_case_attachments,
list_case_tags,
list_case_comments,
remove_case_tag_by_id,
remove_case_tag_by_key_value,
update_case,
)
from argus_api.api.events.v1 import get_event_by_path
from argus_api.api.events.v1.case.case import get_events_for_case
from argus_api.api.events.v1.aggregated import (
find_aggregated_events,
list_aggregated_events,
)
from argus_api.api.events.v1.payload import get_payload
from argus_api.api.events.v1.pcap import get_pcap
from argus_api.api.events.v1.nids import find_n_i_d_s_events, list_n_i_d_s_events
from argus_api.api.pdns.v3.search import search_records
from argus_api.api.reputation.v1.observation import (
fetch_observations_for_domain,
fetch_observations_for_i_p,
)
# Disable insecure warnings
urllib3.disable_warnings()
""" CONSTANTS """
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
PRETTY_DATE_FORMAT = "%b %d, %Y, %H:%M:%S"
FETCH_TAG = demisto.params().get("fetch_tag")
""" HELPER FUNCTIONS """
def set_argus_settings(
api_key: str, base_url: str = None, proxies: dict = None, verify: bool = None
):
argus_session.api_key = api_key
argus_session.base_url = base_url
argus_session.proxies = proxies
argus_session.verify = verify
def argus_priority_to_demisto_severity(priority: str) -> int:
mapping = {"low": 1, "medium": 2, "high": 3, "critical": 4}
return mapping.get(priority, 0)
def argus_status_to_demisto_status(status: str) -> int:
mapping = {
"pendingCustomer": 0,
"pendingSoc": 0,
"pendingVendor": 0,
"pendingClose": 0,
"workingSoc": 1,
"workingCustomer": 1,
"closed": 2,
}
return mapping.get(status, 0)
def build_argus_priority_from_min_severity(min_severity: str) -> List[str]:
severities = ["low", "medium", "high", "critical"]
min_severity_list = []
for severity in severities:
if argus_priority_to_demisto_severity(
min_severity.lower()
) <= argus_priority_to_demisto_severity(severity):
min_severity_list.append(severity)
return min_severity_list
def parse_first_fetch(first_fetch: Any) -> Any:
if isinstance(first_fetch, str):
if first_fetch[0] != "-":
first_fetch = f"-{first_fetch}"
return first_fetch
def build_tags_from_list(lst: list) -> List[Dict]:
if not lst:
return []
if len(lst) % 2 != 0:
return []
tags = []
for i in range(0, len(lst), 2):
tags.append({"key": lst[i], "value": lst[i + 1]})
return tags
def str_to_dict(string: str) -> dict:
if not string:
return {}
lst = argToList(string)
if len(lst) % 2 != 0:
return {}
return {lst[i]: lst[i + 1] for i in range(0, len(lst), 2)}
def date_time_to_epoch_milliseconds(date_time: Union[datetime, str] = None) -> int:
if isinstance(date_time, datetime):
return int(date_time.timestamp() * 1000)
if isinstance(date_time, str):
return date_time_to_epoch_milliseconds(dateparser.parse(date_time))
return int(datetime.now().timestamp() * 1000)
def pretty_print_date(date_time: Union[datetime, str] = None) -> str:
if isinstance(date_time, datetime):
return date_time.strftime(PRETTY_DATE_FORMAT)
if isinstance(date_time, str):
return pretty_print_date(dateparser.parse(date_time))
return datetime.now().strftime(PRETTY_DATE_FORMAT)
def pretty_print_case_metadata(result: dict, title: str = None) -> str:
data = result["data"]
string = title if title else f"# #{data['id']}: {data['subject']}\n"
string += "_Priority: {}, status: {}, last updated: {}_\n".format(
data["priority"], data["status"], pretty_print_date(data["lastUpdatedTime"])
)
string += "Reported by {} at {}\n\n".format(
data["publishedByUser"]["name"], pretty_print_date(data["publishedTime"])
)
string += data["description"]
return string
def pretty_print_comment(comment: dict, title: str = None) -> str:
string = title if title else ""
string += f"#### *{comment['addedByUser']['userName']} - {pretty_print_date(comment['addedTime'])}*\n"
string += (
f"_Last updated {pretty_print_date(comment['lastUpdatedTime'])}_\n"
if comment["lastUpdatedTime"]
else ""
)
string += f"{comment['comment']}\n\n"
string += f"_id: {comment['id']}_\n"
string += f"_Flags: {str(comment['flags'])}_\n" if comment["flags"] else ""
string += "* * *\n"
return string
def pretty_print_comments(comments: list, title: str = None) -> str:
string = title if title else ""
for comment in comments:
string += pretty_print_comment(comment)
return string
def pretty_print_events(result: dict, title: str = None) -> str:
string = title if title else ""
string += "_Count: {}, showing {} events, from {} to {}_\n".format(
result["count"], result["size"], result["offset"], result["limit"]
)
string += tableToMarkdown("Events", result["data"])
return string
""" COMMAND FUNCTIONS """
def test_module_command() -> str:
response = get_current_user()
if response["responseCode"] == 200:
return "ok"
return (
f"Unable to communicate with Argus API {response['responseCode']}, {response}"
)
def fetch_incidents(
last_run: dict, first_fetch_period: str, limit: int = 25, min_severity: str = "low"
):
start_timestamp = last_run.get("start_time", None) if last_run else None
# noinspection PyTypeChecker
result = advanced_case_search(
startTimestamp=start_timestamp if start_timestamp else first_fetch_period,
endTimestamp="now",
limit=limit,
sortBy=["createdTimestamp"],
priority=build_argus_priority_from_min_severity(min_severity),
subCriteria=[
{"exclude": True, "status": ["closed"]},
],
timeFieldStrategy=["createdTimestamp"],
)
incidents = []
for case in result["data"]:
incidents.append(
{
"name": f"#{case['id']}: {case['subject']}",
"occurred": case["createdTime"],
"severity": argus_priority_to_demisto_severity(case["priority"]),
"status": argus_status_to_demisto_status(case["status"]),
"details": case["description"],
"customFields": {
"argus_id": str(case["id"]),
"type": case["type"],
"category": case["category"]["name"] if case["category"] else None,
"service": case["service"]["name"],
"lastUpdatedTime": case["lastUpdatedTime"],
"createdTimestamp": case["createdTimestamp"],
"customer": case["customer"]["shortName"],
},
"rawJSON": json.dumps(case),
}
)
if result["data"]:
last_run["start_time"] = str(result["data"][-1]["createdTimestamp"] + 1)
return last_run, incidents
def add_case_tag_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
key = args.get("key", None)
value = args.get("value", None)
if not case_id:
raise ValueError("case_id not specified")
if not key:
raise ValueError("key not specified")
if not value:
raise ValueError("value not specified")
tag = {"key": key, "value": value}
result = add_case_tag(caseID=case_id, tags=tag)
headers = ["key", "value", "addedTime"]
readable_output = tableToMarkdown(
f"#{case_id}: Tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def add_comment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
comment = args.get("comment", None)
if not case_id:
raise ValueError("case_id not specified")
if not comment:
raise ValueError("comment not specified")
result = add_comment(
caseID=case_id,
comment=comment,
asReplyTo=args.get("as_reply_to", None),
internal=args.get("internal", None),
originEmailAddress=args.get("origin_email_address", None),
associatedAttachmentID=args.get("associated_attachment_id", None),
)
return CommandResults(
readable_output=pretty_print_comment(
result["data"], f"# #{case_id}: Added comment\n"
),
outputs_prefix="Argus.Comment",
outputs=result,
raw_response=result,
)
def advanced_case_search_command(args: Dict[str, Any]) -> CommandResults:
# noinspection PyTypeChecker
result = advanced_case_search(
startTimestamp=args.get("start_timestamp", None),
endTimestamp=args.get("end_timestamp", None),
limit=args.get("limit", None),
offset=args.get("offset", None),
includeDeleted=args.get("include_deleted", None),
subCriteria=argToList(args.get("sub_criteria", None)),
exclude=args.get("exclude", None),
required=args.get("required", None),
customerID=argToList(args.get("customer_id", None)),
caseID=argToList(args.get("case_id", None)),
customer=argToList(args.get("customer", None)),
type=argToList(args.get("case_type", None)),
service=argToList(args.get("service", None)),
category=argToList(args.get("category", None)),
status=argToList(args.get("status", None)),
priority=argToList(args.get("priority", None)),
assetID=argToList(args.get("asset_id", None)),
tag=argToList(args.get("tag", None)),
workflow=argToList(args.get("workflow", None)),
field=argToList(args.get("field", None)),
keywords=argToList(args.get("keywords", None)),
timeFieldStrategy=argToList(args.get("time_field_strategy", None)),
timeMatchStrategy=args.get("time_match_strategy", None),
keywordFieldStrategy=argToList(args.get("keyword_field_strategy", None)),
keywordMatchStrategy=args.get("keyword_match_strategy", None),
user=argToList(args.get("user", None)),
userFieldStrategy=argToList(args.get("user_field_strategy", None)),
userAssigned=args.get("user_assigned", None),
techAssigned=args.get("tech_assigned", None),
includeWorkflows=args.get("include_workflows", None),
includeDescription=args.get("include_description", None),
accessMode=argToList(args.get("access_mode", None)),
explicitAccess=argToList(args.get("explicit_access", None)),
sortBy=argToList(args.get("sort_by", None)),
includeFlags=argToList(args.get("include_flags", None)),
excludeFlags=argToList(args.get("exclude_flags", None)),
)
readable_output = f"Advanced Case Search: {result['count']} result(s)\n"
readable_output += tableToMarkdown(
"Output not suitable for playground", result["data"]
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Cases",
outputs=result,
raw_response=result,
)
def close_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case_id not specified")
result = close_case(
caseID=case_id,
comment=args.get("comment", None),
)
readable_output = f"# #{case_id}: close case\n"
readable_output += (
f"_Status: {result['data']['status']}, at: {result['data']['closedTime']}_"
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def create_case_command(args: Dict[str, Any]) -> CommandResults:
subject = args.get("subject", None)
description = args.get("description", None)
service = args.get("service", None)
case_type = args.get("type", None)
tags = args.get("tags", None)
if not subject:
raise ValueError("subject not specified")
if not description:
raise ValueError("description not specified")
if not service:
raise ValueError("service not specified")
if not case_type:
raise ValueError("case_type not specified")
if tags:
tags = str(tags).split(",")
if len(tags) % 2 != 0:
raise ValueError("tags list must be of even number", tags)
tags = build_tags_from_list(tags)
result = create_case(
customer=args.get("customer", None),
service=service,
category=args.get("category", None),
type=case_type,
status=args.get("status", None),
tags=tags,
subject=subject,
description=description,
customerReference=args.get("customer_reference", None),
priority=args.get("priority", None),
accessMode=args.get("access_mode", None),
originEmailAddress=args.get("origin_email_address", None),
publish=args.get("publish", None),
defaultWatchers=args.get("default_watchers", None),
)
return CommandResults(
readable_output=pretty_print_case_metadata(result),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def delete_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = delete_case(caseID=case_id)
return CommandResults(
readable_output=pretty_print_case_metadata(result, "Case deleted"),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def delete_comment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
comment_id = args.get("comment_id", None)
if not case_id:
raise ValueError("case id not specified")
if not comment_id:
raise ValueError("comment id not specified")
result = delete_comment(caseID=case_id, commentID=comment_id)
return CommandResults(
readable_output=pretty_print_comment(
result["data"], f"# #{case_id}: Deleted comment\n"
),
outputs_prefix="Argus.Comment",
outputs=result,
raw_response=result,
)
def download_attachment_command(args: Dict[str, Any]) -> Any:
case_id = args.get("case_id", None)
attachment_id = args.get("attachment_id", None)
if not case_id:
raise ValueError("case id not specified")
if not attachment_id:
raise ValueError("attachment id not specified")
result = download_attachment(caseID=case_id, attachmentID=attachment_id)
return fileResult(attachment_id, result.content)
def edit_comment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
comment_id = args.get("comment_id", None)
comment = args.get("comment", None)
if not case_id:
raise ValueError("case id not specified")
if not comment_id:
raise ValueError("comment id not specified")
if not comment:
raise ValueError("comment not specified")
result = edit_comment(caseID=case_id, commentID=comment_id, comment=comment)
return CommandResults(
readable_output=pretty_print_comment(
result["data"], f"# #{case_id}: Updated comment\n"
),
outputs_prefix="Argus.Comment",
outputs=result,
raw_response=result,
)
def get_attachment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
attachment_id = args.get("attachment_id", None)
if not case_id:
raise ValueError("case id not specified")
if not attachment_id:
raise ValueError("attachment id not specified")
result = get_attachment(caseID=case_id, attachmentID=attachment_id)
readable_output = f"# #{case_id}: attachment metadata\n"
readable_output += f"#### *{result['data']['addedByUser']['userName']} - {result['data']['addedTime']}*\n"
readable_output += f"{result['data']['name']} ({result['data']['mimeType']}, {result['data']['size']} bytes)\n\n"
readable_output += f"_id: {result['data']['id']}_\n"
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Attachments",
outputs=result,
raw_response=result,
)
def get_case_metadata_by_id_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = get_case_metadata_by_id(
id=case_id, skipRedirect=args.get("skip_redirect", None)
)
return CommandResults(
readable_output=pretty_print_case_metadata(result),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def list_case_attachments_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case_id not specified")
result = list_case_attachments(
caseID=case_id, limit=args.get("limit", None), offset=args.get("offset", None)
)
readable_output = f"# #{case_id}: Case attachments\n"
for attachment in result["data"]:
readable_output += f"#### *{attachment['addedByUser']['userName']} - {attachment['addedTime']}*\n"
readable_output += f"{attachment['name']} ({attachment['mimeType']}, {attachment['size']} kb)\n\n"
readable_output += f"_id: {attachment['id']}_\n"
readable_output += "* * *\n"
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Attachments",
outputs=result,
raw_response=result,
)
def list_case_tags_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case_id not specified")
result = list_case_tags(
caseID=case_id, limit=args.get("limit", None), offset=args.get("offset", None)
)
headers = ["key", "value", "addedTime", "id"]
readable_output = tableToMarkdown(
f"#{case_id}: Tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def list_case_comments_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
sort_by = args.get("sort_by", None)
if not case_id:
raise ValueError("case_id not specified")
if sort_by:
sort_by = ["addedTimestamp"] if sort_by == "ascending" else ["-addedTimestamp"]
result = list_case_comments(
caseID=case_id,
beforeComment=args.get("before_comment", None),
afterComment=args.get("after_comment", None),
offset=args.get("offset", None),
limit=args.get("limit", None),
sortBy=sort_by,
)
return CommandResults(
readable_output=pretty_print_comments(
result["data"], f"# #{case_id}: Comments\n"
),
outputs_prefix="Argus.Comments",
outputs=result,
raw_response=result,
)
def remove_case_tag_by_id_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
tag_id = args.get("tag_id", None)
if not case_id:
raise ValueError("case id not specified")
if not tag_id:
raise ValueError("tag id not specified")
result = remove_case_tag_by_id(caseID=case_id, tagID=tag_id)
headers = ["key", "value", "addedTime", "id", "flags"]
readable_output = tableToMarkdown(
f"#{case_id}: Delete tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def remove_case_tag_by_key_value_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
key = args.get("key", None)
value = args.get("value", None)
if not case_id:
raise ValueError("case id not specified")
if not key:
raise ValueError("key not specified")
if not value:
raise ValueError("value not specified")
result = remove_case_tag_by_key_value(caseID=case_id, tagKey=key, tagValue=value)
headers = ["key", "value", "addedTime", "id", "flags"]
readable_output = tableToMarkdown(
f"#{case_id}: Delete tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def update_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = update_case(
id=case_id,
subject=args.get("subject", None),
description=args.get("description", None),
status=args.get("status", None),
priority=args.get("priority", None),
category=args.get("category", None),
reporter=args.get("reporter", None),
assignedUser=args.get("assigned_user", None),
assignedTech=args.get("assigned_tech", None),
customerReference=args.get("customer_reference", None),
comment=args.get("comment", None),
originEmailAddress=args.get("origin_email_address", None),
hasEvents=args.get("has_events", None),
internalComment=args.get("internal_comment", None),
)
return CommandResults(
readable_output=pretty_print_case_metadata(result),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def get_event_command(args: Dict[str, Any]) -> CommandResults:
event_type = args.get("type", None)
timestamp = args.get("timestamp", None)
customer_id = args.get("customer_id", None)
event_id = args.get("event_id", None)
if not event_type:
raise ValueError("event type not specified")
if not timestamp:
raise ValueError("timestamp not specified")
if not customer_id:
raise ValueError("customer id not specified")
if not event_id:
raise ValueError("event id not specified")
result = get_event_by_path(
type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id
)
return CommandResults(
readable_output=tableToMarkdown(f"Event: {event_id}", result["data"]),
outputs_prefix="Argus.Event",
outputs=result,
raw_response=result,
)
def get_events_for_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = get_events_for_case(
caseID=case_id, limit=args.get("limit", None), offset=args.get("offset", None)
)
return CommandResults(
readable_output=pretty_print_events(
dict(result), f"# #{case_id}: Associated Events\n"
),
outputs_prefix="Argus.Events",
outputs=result,
raw_response=result,
)
def find_aggregated_events_command(args: Dict[str, Any]) -> CommandResults:
# noinspection PyTypeChecker
result = find_aggregated_events(
skipFutureEvents=args.get("skip_future_events", None),
exclude=args.get("exclude", None),
locationID=argToList(args.get("location_id", None)),
severity=argToList(args.get("severity", None)),
customer=argToList(args.get("customer", None)),
alarmID=argToList(args.get("alarm_id", None)),
attackCategoryID=argToList(args.get("attack_category_id", None)),
sourceGeoCountry=argToList(args.get("source_geo_country", None)),
destinationGeoCountry=argToList(args.get("destination_geo_country", None)),
geoCountry=argToList(args.get("geo_country", None)),
properties=str_to_dict(args.get("properties", None)),
exactMatchProperties=args.get("exact_match_properties", None),
subCriteria=argToList(args.get("sub_criteria", None)),
signature=argToList(args.get("signature", None)),
lastUpdatedTimestamp=args.get("last_updated_timestamp", None),
indexStartTime=args.get("index_start_time", None),
indexEndTime=args.get("index_end_time", None),
destinationIP=argToList(args.get("destination_ip", None)),
sourceIP=argToList(args.get("source_ip", None)),
ip=argToList(args.get("ip", None)),
destinationPort=argToList(args.get("destination_port", None)),
sourcePort=argToList(args.get("source_port", None)),
port=argToList(args.get("port", None)),
minSeverity=args.get("min_severity", None),
maxSeverity=args.get("max_severity", None),
limit=args.get("limit", 25),
offset=args.get("offset", None),
includeDeleted=args.get("include_deleted", None),
minCount=args.get("min_count", None),
associatedCaseID=argToList(args.get("associated_case_id", None)),
sourceIPMinBits=args.get("source_ip_min_bits", None),
destinationIPMinBits=args.get("destination_ip_min_bits", None),
startTimestamp=args.get("start_timestamp", "-24hours"),
endTimestamp=args.get("end_timestamp", "now"),
sortBy=argToList(args.get("sort_by", None)),
includeFlags=argToList(args.get("include_flags", None)),
excludeFlags=argToList(args.get("exclude_flags", None)),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# Find events\n"),
outputs_prefix="Argus.Events",
outputs=result,
raw_response=result,
)
def list_aggregated_events_command(args: Dict[str, Any]) -> CommandResults:
result = list_aggregated_events(
customerID=args.get("customer_id", None),
signature=args.get("signature", None),
ip=args.get("ip", None),
startTimestamp=args.get("start_timestamp", None),
endTimestamp=args.get("end_timestamp", None),
limit=args.get("limit", None),
offset=args.get("offset", None),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# List Events\n"),
outputs_prefix="Argus.Events",
outputs=result,
raw_response=result,
)
def get_payload_command(args: Dict[str, Any]) -> CommandResults:
event_type = args.get("type", None)
timestamp = args.get("timestamp", None)
customer_id = args.get("customer_id", None)
event_id = args.get("event_id", None)
if not event_type:
raise ValueError("event type not specified")
if not timestamp:
raise ValueError("timestamp not specified")
if not customer_id:
raise ValueError("customer id not specified")
if not event_id:
raise ValueError("event id not specified")
result = get_payload(
type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id
)
readable_output = "# Event payload\n"
readable_output += f"Event: {event_id}, type: {result['data']['type']}\n"
readable_output += result["data"]["payload"]
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Payload",
outputs=result,
raw_response=result,
)
def get_pcap_command(args: Dict[str, Any]) -> Any:
event_type = args.get("type", None)
timestamp = args.get("timestamp", None)
customer_id = args.get("customer_id", None)
event_id = args.get("event_id", None)
if not event_type:
raise ValueError("event type not specified")
if not timestamp:
raise ValueError("timestamp not specified")
if not customer_id:
raise ValueError("customer id not specified")
if not event_id:
raise ValueError("event id not specified")
result = get_pcap(
type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id
)
return fileResult(f"{event_id}_pcap", result.content)
def find_nids_events_command(args: Dict[str, Any]) -> CommandResults:
# noinspection PyTypeChecker
result = find_n_i_d_s_events(
skipFutureEvents=args.get("skip_future_events", None),
exclude=args.get("exclude", None),
eventIdentifier=argToList(args.get("event_identifier", None)),
locationID=argToList(args.get("location_id", None)),
severity=argToList(args.get("severity", None)),
customer=argToList(args.get("customer", None)),
alarmID=argToList(args.get("alarm_id", None)),
attackCategoryID=argToList(args.get("attack_category_id", None)),
sourceGeoCountry=argToList(args.get("source_geo_country", None)),
destinationGeoCountry=argToList(args.get("destination_geo_country", None)),
geoCountry=argToList(args.get("geo_country", None)),
properties=str_to_dict(args.get("properties", None)),
exactMatchProperties=args.get("exact_match_properties", None),
sensorID=argToList(args.get("sensor_id", None)),
subCriteria=argToList(args.get("sub_criteria", None)),
signature=argToList(args.get("signature", None)),
lastUpdatedTimestamp=args.get("last_updated_timestamp", None),
indexStartTime=args.get("index_start_time", None),
indexEndTime=args.get("index_end_time", None),
destinationIP=argToList(args.get("destination_ip", None)),
sourceIP=argToList(args.get("source_ip", None)),
ip=argToList(args.get("ip", None)),
destinationPort=argToList(args.get("destination_port", None)),
sourcePort=argToList(args.get("source_port", None)),
port=argToList(args.get("port", None)),
minSeverity=args.get("min_severity", None),
maxSeverity=args.get("max_severity", None),
limit=args.get("limit", 25),
offset=args.get("offset", None),
includeDeleted=args.get("include_deleted", None),
startTimestamp=args.get("start_timestamp", "-24hours"),
endTimestamp=args.get("end_timestamp", "now"),
sortBy=argToList(args.get("sort_by", None)),
includeFlags=argToList(args.get("include_flags", None)),
excludeFlags=argToList(args.get("exclude_flags", None)),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# Find NIDS Events\n"),
outputs_prefix="Argus.NIDS",
outputs=result,
raw_response=result,
)
def list_nids_events_command(args: Dict[str, Any]) -> CommandResults:
result = list_n_i_d_s_events(
customerID=args.get("customer_id", None),
signature=args.get("signature", None),
ip=args.get("ip", None),
startTimestamp=args.get("start_timestamp", None),
endTimestamp=args.get("end_timestamp", None),
limit=args.get("limit", None),
offset=args.get("offset", None),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# List NIDS Events\n"),
outputs_prefix="Argus.NIDS",
outputs=result,
raw_response=result,
)
def search_records_command(args: Dict[str, Any]) -> CommandResults:
query = args.get("query", None)
if not query:
raise ValueError("query not specified")
# noinspection PyTypeChecker
result = search_records(
query=query,
aggregateResult=args.get("aggregate_result", None),
includeAnonymousResults=args.get("include_anonymous_results", None),
rrClass=argToList(args.get("rr_class", None)),
rrType=argToList(args.get("rr_type", None)),
customerID=argToList(args.get("customer_id", None)),
tlp=argToList((args.get("tlp", None))),
limit=args.get("limit", 25),
offset=args.get("offset", None),
)
return CommandResults(
readable_output=tableToMarkdown("PDNS records", result["data"]),
outputs_prefix="Argus.PDNS",
outputs=result,
raw_response=result,
)
def fetch_observations_for_domain_command(args: Dict[str, Any]) -> CommandResults:
fqdn = args.get("fqdn", None)
if not fqdn:
raise ValueError("fqdn not specified")
result = fetch_observations_for_domain(fqdn=fqdn)
return CommandResults(
readable_output=tableToMarkdown(
f'Domain observations for "{fqdn}"', result["data"]
),
outputs_prefix="Argus.ObservationsDomain",
outputs=result,
raw_response=result,
)
def fetch_observations_for_i_p_command(args: Dict[str, Any]) -> CommandResults:
ip = args.get("ip", None)
if not ip:
raise ValueError("ip not specified")
result = fetch_observations_for_i_p(ip=ip)
return CommandResults(
readable_output=tableToMarkdown(f'IP observations for "{ip}"', result["data"]),
outputs_prefix="Argus.ObservationsIP",
outputs=result,
raw_response=result,
)
""" MAIN FUNCTION """
def main() -> None:
logging.getLogger("argus_cli").setLevel("WARNING")
first_fetch_period = parse_first_fetch(
demisto.params().get("first_fetch", "-1 day")
)
set_argus_settings(
demisto.params().get("api_key"),
demisto.params().get("api_url"),
handle_proxy(),
demisto.params().get("insecure", None),
)
demisto.debug(f"Command being called is {demisto.command()}")
try:
if demisto.command() == "test-module":
# This is the call made when pressing the integration Test button.
return_results(test_module_command())
elif demisto.command() == "fetch-incidents":
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
last_run=demisto.getLastRun(),
first_fetch_period=first_fetch_period,
limit=demisto.params().get("max_fetch", 25),
min_severity=demisto.params().get("min_severity", "low"),
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == "argus-add-case-tag":
return_results(add_case_tag_command(demisto.args()))
elif demisto.command() == "argus-add-comment":
return_results(add_comment_command(demisto.args()))
elif demisto.command() == "argus-advanced-case-search":
return_results(advanced_case_search_command(demisto.args()))
elif demisto.command() == "argus-close-case":
return_results(close_case_command(demisto.args()))
elif demisto.command() == "argus-create-case":
return_results(create_case_command(demisto.args()))
elif demisto.command() == "argus-delete-case":
return_results(delete_case_command(demisto.args()))
elif demisto.command() == "argus-delete-comment":
return_results(delete_comment_command(demisto.args()))
elif demisto.command() == "argus-download-attachment":
return_results(download_attachment_command(demisto.args()))
elif demisto.command() == "argus-edit-comment":
return_results(edit_comment_command(demisto.args()))
elif demisto.command() == "argus-get-attachment":
return_results(get_attachment_command(demisto.args()))
elif demisto.command() == "argus-get-case-metadata-by-id":
return_results(get_case_metadata_by_id_command(demisto.args()))
elif demisto.command() == "argus-list-case-attachments":
return_results(list_case_attachments_command(demisto.args()))
elif demisto.command() == "argus-list-case-tags":
return_results(list_case_tags_command(demisto.args()))
elif demisto.command() == "argus-list-case-comments":
return_results(list_case_comments_command(demisto.args()))
elif demisto.command() == "argus-remove-case-tag-by-id":
return_results(remove_case_tag_by_id_command(demisto.args()))
elif demisto.command() == "argus-remove-case-tag-by-key-value":
return_results(remove_case_tag_by_key_value_command(demisto.args()))
elif demisto.command() == "argus-update-case":
return_results(update_case_command(demisto.args()))
elif demisto.command() == "argus-get-event":
return_results(get_event_command(demisto.args()))
elif demisto.command() == "argus-get-events-for-case":
return_results(get_events_for_case_command(demisto.args()))
elif demisto.command() == "argus-find-aggregated-events":
return_results(find_aggregated_events_command(demisto.args()))
elif demisto.command() == "argus-list-aggregated-events":
return_results(list_aggregated_events_command(demisto.args()))
elif demisto.command() == "argus-get-payload":
return_results(get_payload_command(demisto.args()))
elif demisto.command() == "argus-get-pcap":
return_results(get_pcap_command(demisto.args()))
elif demisto.command() == "argus-find-nids-events":
return_results(find_nids_events_command(demisto.args()))
elif demisto.command() == "argus-list-nids-events":
return_results(list_nids_events_command(demisto.args()))
elif demisto.command() == "argus-pdns-search-records":
return_results(search_records_command(demisto.args()))
elif demisto.command() == "argus-fetch-observations-for-domain":
return_results(fetch_observations_for_domain_command(demisto.args()))
elif demisto.command() == "argus-fetch-observations-for-ip":
return_results(fetch_observations_for_i_p_command(demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(
f"Failed to execute {demisto.command()} command.\nError:\n{str(e)}"
)
""" ENTRY POINT """
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
|
[
"demistomock.params",
"logging.getLogger",
"demistomock.command",
"demistomock.args",
"argus_api.api.reputation.v1.observation.fetch_observations_for_i_p",
"argus_api.api.cases.v2.case.download_attachment",
"demistomock.setLastRun",
"argus_api.api.cases.v2.case.delete_case",
"argus_api.api.cases.v2.case.remove_case_tag_by_key_value",
"argus_api.api.cases.v2.case.delete_comment",
"json.dumps",
"argus_api.api.cases.v2.case.remove_case_tag_by_id",
"argus_api.api.cases.v2.case.edit_comment",
"argus_api.api.events.v1.get_event_by_path",
"argus_api.api.cases.v2.case.get_attachment",
"dateparser.parse",
"demistomock.getLastRun",
"urllib3.disable_warnings",
"argus_api.api.events.v1.payload.get_payload",
"argus_api.api.events.v1.pcap.get_pcap",
"traceback.format_exc",
"argus_api.api.cases.v2.case.add_case_tag",
"demistomock.incidents",
"argus_api.api.reputation.v1.observation.fetch_observations_for_domain",
"argus_api.api.currentuser.v1.user.get_current_user"
] |
[((1357, 1383), 'urllib3.disable_warnings', 'urllib3.disable_warnings', ([], {}), '()\n', (1381, 1383), False, 'import urllib3\n'), ((5694, 5712), 'argus_api.api.currentuser.v1.user.get_current_user', 'get_current_user', ([], {}), '()\n', (5710, 5712), False, 'from argus_api.api.currentuser.v1.user import get_current_user\n'), ((8036, 8074), 'argus_api.api.cases.v2.case.add_case_tag', 'add_case_tag', ([], {'caseID': 'case_id', 'tags': 'tag'}), '(caseID=case_id, tags=tag)\n', (8048, 8074), False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((14163, 14190), 'argus_api.api.cases.v2.case.delete_case', 'delete_case', ([], {'caseID': 'case_id'}), '(caseID=case_id)\n', (14174, 14190), False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((14707, 14759), 'argus_api.api.cases.v2.case.delete_comment', 'delete_comment', ([], {'caseID': 'case_id', 'commentID': 'comment_id'}), '(caseID=case_id, commentID=comment_id)\n', (14721, 14759), False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((15329, 15392), 'argus_api.api.cases.v2.case.download_attachment', 'download_attachment', ([], {'caseID': 'case_id', 'attachmentID': 'attachment_id'}), '(caseID=case_id, attachmentID=attachment_id)\n', (15348, 15392), False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((15871, 15938), 'argus_api.api.cases.v2.case.edit_comment', 'edit_comment', ([], {'caseID': 'case_id', 'commentID': 'comment_id', 'comment': 'comment'}), '(caseID=case_id, commentID=comment_id, comment=comment)\n', (15883, 15938), False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((16514, 16572), 'argus_api.api.cases.v2.case.get_attachment', 'get_attachment', ([], {'caseID': 'case_id', 'attachmentID': 'attachment_id'}), '(caseID=case_id, attachmentID=attachment_id)\n', (16528, 16572), False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((20247, 20298), 'argus_api.api.cases.v2.case.remove_case_tag_by_id', 'remove_case_tag_by_id', ([], {'caseID': 'case_id', 'tagID': 'tag_id'}), '(caseID=case_id, tagID=tag_id)\n', (20268, 20298), False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((21040, 21112), 'argus_api.api.cases.v2.case.remove_case_tag_by_key_value', 'remove_case_tag_by_key_value', ([], {'caseID': 'case_id', 'tagKey': 'key', 'tagValue': 'value'}), '(caseID=case_id, tagKey=key, tagValue=value)\n', (21068, 21112), False, 'from argus_api.api.cases.v2.case import add_case_tag, add_comment, advanced_case_search, close_case, create_case, delete_case, delete_comment, download_attachment, edit_comment, get_attachment, get_case_metadata_by_id, list_case_attachments, list_case_tags, list_case_comments, remove_case_tag_by_id, remove_case_tag_by_key_value, update_case\n'), ((23077, 23179), 'argus_api.api.events.v1.get_event_by_path', 'get_event_by_path', ([], {'type': 'event_type', 'timestamp': 'timestamp', 'customerID': 'customer_id', 'eventID': 'event_id'}), '(type=event_type, timestamp=timestamp, customerID=\n customer_id, eventID=event_id)\n', (23094, 23179), False, 'from argus_api.api.events.v1 import get_event_by_path\n'), ((27720, 27815), 'argus_api.api.events.v1.payload.get_payload', 'get_payload', ([], {'type': 'event_type', 'timestamp': 'timestamp', 'customerID': 'customer_id', 'eventID': 'event_id'}), '(type=event_type, timestamp=timestamp, customerID=customer_id,\n eventID=event_id)\n', (27731, 27815), False, 'from argus_api.api.events.v1.payload import get_payload\n'), ((28703, 28795), 'argus_api.api.events.v1.pcap.get_pcap', 'get_pcap', ([], {'type': 'event_type', 'timestamp': 'timestamp', 'customerID': 'customer_id', 'eventID': 'event_id'}), '(type=event_type, timestamp=timestamp, customerID=customer_id,\n eventID=event_id)\n', (28711, 28795), False, 'from argus_api.api.events.v1.pcap import get_pcap\n'), ((33028, 33068), 'argus_api.api.reputation.v1.observation.fetch_observations_for_domain', 'fetch_observations_for_domain', ([], {'fqdn': 'fqdn'}), '(fqdn=fqdn)\n', (33057, 33068), False, 'from argus_api.api.reputation.v1.observation import fetch_observations_for_domain, fetch_observations_for_i_p\n'), ((33508, 33541), 'argus_api.api.reputation.v1.observation.fetch_observations_for_i_p', 'fetch_observations_for_i_p', ([], {'ip': 'ip'}), '(ip=ip)\n', (33534, 33541), False, 'from argus_api.api.reputation.v1.observation import fetch_observations_for_domain, fetch_observations_for_i_p\n'), ((1494, 1510), 'demistomock.params', 'demisto.params', ([], {}), '()\n', (1508, 1510), True, 'import demistomock as demisto\n'), ((3593, 3620), 'dateparser.parse', 'dateparser.parse', (['date_time'], {}), '(date_time)\n', (3609, 3620), False, 'import dateparser\n'), ((3906, 3933), 'dateparser.parse', 'dateparser.parse', (['date_time'], {}), '(date_time)\n', (3922, 3933), False, 'import dateparser\n'), ((33813, 33843), 'logging.getLogger', 'logging.getLogger', (['"""argus_cli"""'], {}), "('argus_cli')\n", (33830, 33843), False, 'import logging\n'), ((34241, 34258), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (34256, 34258), True, 'import demistomock as demisto\n'), ((7431, 7447), 'json.dumps', 'json.dumps', (['case'], {}), '(case)\n', (7441, 7447), False, 'import json\n'), ((33917, 33933), 'demistomock.params', 'demisto.params', ([], {}), '()\n', (33931, 33933), True, 'import demistomock as demisto\n'), ((34002, 34018), 'demistomock.params', 'demisto.params', ([], {}), '()\n', (34016, 34018), True, 'import demistomock as demisto\n'), ((34043, 34059), 'demistomock.params', 'demisto.params', ([], {}), '()\n', (34057, 34059), True, 'import demistomock as demisto\n'), ((34108, 34124), 'demistomock.params', 'demisto.params', ([], {}), '()\n', (34122, 34124), True, 'import demistomock as demisto\n'), ((34200, 34217), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (34215, 34217), True, 'import demistomock as demisto\n'), ((34420, 34437), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (34435, 34437), True, 'import demistomock as demisto\n'), ((34881, 34909), 'demistomock.setLastRun', 'demisto.setLastRun', (['next_run'], {}), '(next_run)\n', (34899, 34909), True, 'import demistomock as demisto\n'), ((34922, 34950), 'demistomock.incidents', 'demisto.incidents', (['incidents'], {}), '(incidents)\n', (34939, 34950), True, 'import demistomock as demisto\n'), ((38712, 38734), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (38732, 38734), False, 'import traceback\n'), ((34965, 34982), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (34980, 34982), True, 'import demistomock as demisto\n'), ((34642, 34662), 'demistomock.getLastRun', 'demisto.getLastRun', ([], {}), '()\n', (34660, 34662), True, 'import demistomock as demisto\n'), ((35087, 35104), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (35102, 35104), True, 'import demistomock as demisto\n'), ((38814, 38831), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (38829, 38831), True, 'import demistomock as demisto\n'), ((35056, 35070), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (35068, 35070), True, 'import demistomock as demisto\n'), ((35207, 35224), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (35222, 35224), True, 'import demistomock as demisto\n'), ((34741, 34757), 'demistomock.params', 'demisto.params', ([], {}), '()\n', (34755, 34757), True, 'import demistomock as demisto\n'), ((34809, 34825), 'demistomock.params', 'demisto.params', ([], {}), '()\n', (34823, 34825), True, 'import demistomock as demisto\n'), ((35176, 35190), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (35188, 35190), True, 'import demistomock as demisto\n'), ((35345, 35362), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (35360, 35362), True, 'import demistomock as demisto\n'), ((35314, 35328), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (35326, 35328), True, 'import demistomock as demisto\n'), ((35463, 35480), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (35478, 35480), True, 'import demistomock as demisto\n'), ((35432, 35446), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (35444, 35446), True, 'import demistomock as demisto\n'), ((35583, 35600), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (35598, 35600), True, 'import demistomock as demisto\n'), ((35552, 35566), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (35564, 35566), True, 'import demistomock as demisto\n'), ((35703, 35720), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (35718, 35720), True, 'import demistomock as demisto\n'), ((35672, 35686), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (35684, 35686), True, 'import demistomock as demisto\n'), ((35829, 35846), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (35844, 35846), True, 'import demistomock as demisto\n'), ((35798, 35812), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (35810, 35812), True, 'import demistomock as demisto\n'), ((35965, 35982), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (35980, 35982), True, 'import demistomock as demisto\n'), ((35934, 35948), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (35946, 35948), True, 'import demistomock as demisto\n'), ((36087, 36104), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (36102, 36104), True, 'import demistomock as demisto\n'), ((36056, 36070), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (36068, 36070), True, 'import demistomock as demisto\n'), ((36213, 36230), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (36228, 36230), True, 'import demistomock as demisto\n'), ((36182, 36196), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (36194, 36196), True, 'import demistomock as demisto\n'), ((36357, 36374), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (36372, 36374), True, 'import demistomock as demisto\n'), ((36326, 36340), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (36338, 36340), True, 'import demistomock as demisto\n'), ((36497, 36514), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (36512, 36514), True, 'import demistomock as demisto\n'), ((36466, 36480), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (36478, 36480), True, 'import demistomock as demisto\n'), ((36623, 36640), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (36638, 36640), True, 'import demistomock as demisto\n'), ((36592, 36606), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (36604, 36606), True, 'import demistomock as demisto\n'), ((36757, 36774), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (36772, 36774), True, 'import demistomock as demisto\n'), ((36726, 36740), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (36738, 36740), True, 'import demistomock as demisto\n'), ((36897, 36914), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (36912, 36914), True, 'import demistomock as demisto\n'), ((36866, 36880), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (36878, 36880), True, 'import demistomock as demisto\n'), ((37051, 37068), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (37066, 37068), True, 'import demistomock as demisto\n'), ((37020, 37034), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (37032, 37034), True, 'import demistomock as demisto\n'), ((37171, 37188), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (37186, 37188), True, 'import demistomock as demisto\n'), ((37140, 37154), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (37152, 37154), True, 'import demistomock as demisto\n'), ((37287, 37304), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (37302, 37304), True, 'import demistomock as demisto\n'), ((37256, 37270), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (37268, 37270), True, 'import demistomock as demisto\n'), ((37423, 37440), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (37438, 37440), True, 'import demistomock as demisto\n'), ((37392, 37406), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (37404, 37406), True, 'import demistomock as demisto\n'), ((37565, 37582), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (37580, 37582), True, 'import demistomock as demisto\n'), ((37534, 37548), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (37546, 37548), True, 'import demistomock as demisto\n'), ((37707, 37724), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (37722, 37724), True, 'import demistomock as demisto\n'), ((37676, 37690), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (37688, 37690), True, 'import demistomock as demisto\n'), ((37827, 37844), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (37842, 37844), True, 'import demistomock as demisto\n'), ((37796, 37810), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (37808, 37810), True, 'import demistomock as demisto\n'), ((37941, 37958), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (37956, 37958), True, 'import demistomock as demisto\n'), ((37910, 37924), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (37922, 37924), True, 'import demistomock as demisto\n'), ((38071, 38088), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (38086, 38088), True, 'import demistomock as demisto\n'), ((38040, 38054), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (38052, 38054), True, 'import demistomock as demisto\n'), ((38201, 38218), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (38216, 38218), True, 'import demistomock as demisto\n'), ((38170, 38184), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (38182, 38184), True, 'import demistomock as demisto\n'), ((38332, 38349), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (38347, 38349), True, 'import demistomock as demisto\n'), ((38301, 38315), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (38313, 38315), True, 'import demistomock as demisto\n'), ((38488, 38505), 'demistomock.command', 'demisto.command', ([], {}), '()\n', (38503, 38505), True, 'import demistomock as demisto\n'), ((38457, 38471), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (38469, 38471), True, 'import demistomock as demisto\n'), ((38606, 38620), 'demistomock.args', 'demisto.args', ([], {}), '()\n', (38618, 38620), True, 'import demistomock as demisto\n')]
|
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from dymos.utils.hermite import hermite_matrices
class TestHermiteMatrices(unittest.TestCase):
def test_quadratic(self):
# Interpolate with values and rates provided at [-1, 1] in tau space
tau_given = [-1.0, 1.0]
tau_eval = np.linspace(-1, 1, 100)
# In time space use the boundaries [-2, 2]
dt_dtau = 4.0 / 2.0
# Provide values for y = t**2 and its time-derivative
y_given = [4.0, 4.0]
ydot_given = [-4.0, 4.0]
# Get the hermite matrices.
Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval)
# Interpolate y and ydot at tau_eval points in tau space.
y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given)
ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given)
# Compute our function as a point of comparison.
y_computed = (tau_eval * dt_dtau)**2
ydot_computed = 2.0 * (tau_eval * dt_dtau)
# Check results
assert_almost_equal(y_i, y_computed)
assert_almost_equal(ydot_i, ydot_computed)
def test_cubic(self):
# Interpolate with values and rates provided at [-1, 1] in tau space
tau_given = [-1.0, 0.0, 1.0]
tau_eval = np.linspace(-1, 1, 101)
# In time space use the boundaries [-2, 2]
dt_dtau = 4.0 / 2.0
# Provide values for y = t**2 and its time-derivative
y_given = [-8.0, 0.0, 8.0]
ydot_given = [12.0, 0.0, 12.0]
# Get the hermite matrices.
Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval)
# Interpolate y and ydot at tau_eval points in tau space.
y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given)
ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given)
# Compute our function as a point of comparison.
y_computed = (tau_eval * dt_dtau)**3
ydot_computed = 3.0 * (tau_eval * dt_dtau)**2
# Check results
assert_almost_equal(y_i, y_computed)
assert_almost_equal(ydot_i, ydot_computed)
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
[
"numpy.testing.assert_almost_equal",
"numpy.linspace",
"dymos.utils.hermite.hermite_matrices",
"numpy.dot",
"unittest.main"
] |
[((2209, 2224), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2222, 2224), False, 'import unittest\n'), ((340, 363), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (351, 363), True, 'import numpy as np\n'), ((631, 668), 'dymos.utils.hermite.hermite_matrices', 'hermite_matrices', (['tau_given', 'tau_eval'], {}), '(tau_given, tau_eval)\n', (647, 668), False, 'from dymos.utils.hermite import hermite_matrices\n'), ((1072, 1108), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['y_i', 'y_computed'], {}), '(y_i, y_computed)\n', (1091, 1108), False, 'from numpy.testing import assert_almost_equal\n'), ((1117, 1159), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ydot_i', 'ydot_computed'], {}), '(ydot_i, ydot_computed)\n', (1136, 1159), False, 'from numpy.testing import assert_almost_equal\n'), ((1321, 1344), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(101)'], {}), '(-1, 1, 101)\n', (1332, 1344), True, 'import numpy as np\n'), ((1624, 1661), 'dymos.utils.hermite.hermite_matrices', 'hermite_matrices', (['tau_given', 'tau_eval'], {}), '(tau_given, tau_eval)\n', (1640, 1661), False, 'from dymos.utils.hermite import hermite_matrices\n'), ((2068, 2104), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['y_i', 'y_computed'], {}), '(y_i, y_computed)\n', (2087, 2104), False, 'from numpy.testing import assert_almost_equal\n'), ((2113, 2155), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['ydot_i', 'ydot_computed'], {}), '(ydot_i, ydot_computed)\n', (2132, 2155), False, 'from numpy.testing import assert_almost_equal\n'), ((750, 769), 'numpy.dot', 'np.dot', (['Ai', 'y_given'], {}), '(Ai, y_given)\n', (756, 769), True, 'import numpy as np\n'), ((862, 884), 'numpy.dot', 'np.dot', (['Bd', 'ydot_given'], {}), '(Bd, ydot_given)\n', (868, 884), True, 'import numpy as np\n'), ((1743, 1762), 'numpy.dot', 'np.dot', (['Ai', 'y_given'], {}), '(Ai, y_given)\n', (1749, 1762), True, 'import numpy as np\n'), ((1855, 1877), 'numpy.dot', 'np.dot', (['Bd', 'ydot_given'], {}), '(Bd, ydot_given)\n', (1861, 1877), True, 'import numpy as np\n'), ((782, 804), 'numpy.dot', 'np.dot', (['Bi', 'ydot_given'], {}), '(Bi, ydot_given)\n', (788, 804), True, 'import numpy as np\n'), ((840, 859), 'numpy.dot', 'np.dot', (['Ad', 'y_given'], {}), '(Ad, y_given)\n', (846, 859), True, 'import numpy as np\n'), ((1775, 1797), 'numpy.dot', 'np.dot', (['Bi', 'ydot_given'], {}), '(Bi, ydot_given)\n', (1781, 1797), True, 'import numpy as np\n'), ((1833, 1852), 'numpy.dot', 'np.dot', (['Ad', 'y_given'], {}), '(Ad, y_given)\n', (1839, 1852), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""Application configuration."""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from . import __author__, __name__, __version__
class Config(object):
"""Base configuration."""
SERVER_NAME = os.environ.get('SERVER_NAME', None)
PREFERRED_URL_SCHEME = os.environ.get('PREFERRED_URL_SCHEME', 'http')
APP_NAME = __name__
APP_VERSION = __version__
APP_AUTHOR = __author__
JSON_AS_ASCII = False
SECRET_KEY = os.environ.get('XL_AUTH_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory.
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
DEBUG_TB_ENABLED = False # Disable Debug toolbar.
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
SQLALCHEMY_TRACK_MODIFICATIONS = False
WEBPACK_MANIFEST_PATH = 'webpack/manifest.json'
BABEL_DEFAULT_LOCALE = os.environ.get('BABEL_DEFAULT_LOCALE', 'sv')
BABEL_DEFAULT_TIMEZONE = 'utc'
EMAIL_DEFAULT_FROM = os.environ.get('EMAIL_DEFAULT_FROM', '<EMAIL>')
EMAIL_HOST = os.environ.get('EMAIL_HOST', 'smtp.kb.se')
EMAIL_PORT = int(os.environ.get('EMAIL_PORT', '25'))
EMAIL_TIMEOUT = int(os.environ.get('EMAIL_TIMEOUT', '5'))
OAUTH2_PROVIDER_TOKEN_EXPIRES_IN = 36000
XL_AUTH_MAX_ACTIVE_PASSWORD_RESETS = 2
XL_AUTH_FAILED_LOGIN_TIMEFRAME = 60 * 60
XL_AUTH_FAILED_LOGIN_MAX_ATTEMPTS = 7
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI',
'postgresql://localhost/example')
DEBUG_TB_ENABLED = False # Disable Debug toolbar.
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
# Put the db file in project root
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH)
DEBUG_TB_ENABLED = True
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
# For faster tests; needs at least 4 to avoid "ValueError: Invalid rounds".
BCRYPT_LOG_ROUNDS = 4
WTF_CSRF_ENABLED = False # Allows form testing.
EMAIL_BACKEND = 'flask_emails.backends.DummyBackend'
|
[
"os.path.join",
"os.path.dirname",
"os.environ.get"
] |
[((274, 309), 'os.environ.get', 'os.environ.get', (['"""SERVER_NAME"""', 'None'], {}), "('SERVER_NAME', None)\n", (288, 309), False, 'import os\n'), ((337, 383), 'os.environ.get', 'os.environ.get', (['"""PREFERRED_URL_SCHEME"""', '"""http"""'], {}), "('PREFERRED_URL_SCHEME', 'http')\n", (351, 383), False, 'import os\n'), ((509, 555), 'os.environ.get', 'os.environ.get', (['"""XL_AUTH_SECRET"""', '"""secret-key"""'], {}), "('XL_AUTH_SECRET', 'secret-key')\n", (523, 555), False, 'import os\n'), ((1028, 1072), 'os.environ.get', 'os.environ.get', (['"""BABEL_DEFAULT_LOCALE"""', '"""sv"""'], {}), "('BABEL_DEFAULT_LOCALE', 'sv')\n", (1042, 1072), False, 'import os\n'), ((1133, 1180), 'os.environ.get', 'os.environ.get', (['"""EMAIL_DEFAULT_FROM"""', '"""<EMAIL>"""'], {}), "('EMAIL_DEFAULT_FROM', '<EMAIL>')\n", (1147, 1180), False, 'import os\n'), ((1198, 1240), 'os.environ.get', 'os.environ.get', (['"""EMAIL_HOST"""', '"""smtp.kb.se"""'], {}), "('EMAIL_HOST', 'smtp.kb.se')\n", (1212, 1240), False, 'import os\n'), ((1665, 1740), 'os.environ.get', 'os.environ.get', (['"""SQLALCHEMY_DATABASE_URI"""', '"""postgresql://localhost/example"""'], {}), "('SQLALCHEMY_DATABASE_URI', 'postgresql://localhost/example')\n", (1679, 1740), False, 'import os\n'), ((2014, 2056), 'os.path.join', 'os.path.join', (['Config.PROJECT_ROOT', 'DB_NAME'], {}), '(Config.PROJECT_ROOT, DB_NAME)\n', (2026, 2056), False, 'import os\n'), ((605, 630), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (620, 630), False, 'import os\n'), ((686, 718), 'os.path.join', 'os.path.join', (['APP_DIR', 'os.pardir'], {}), '(APP_DIR, os.pardir)\n', (698, 718), False, 'import os\n'), ((1262, 1296), 'os.environ.get', 'os.environ.get', (['"""EMAIL_PORT"""', '"""25"""'], {}), "('EMAIL_PORT', '25')\n", (1276, 1296), False, 'import os\n'), ((1322, 1358), 'os.environ.get', 'os.environ.get', (['"""EMAIL_TIMEOUT"""', '"""5"""'], {}), "('EMAIL_TIMEOUT', '5')\n", (1336, 1358), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
from django.db import models
class KeyConstructorUserProperty(models.Model):
name = models.CharField(max_length=100)
class Meta:
app_label = 'tests_app'
class KeyConstructorUserModel(models.Model):
property = models.ForeignKey(KeyConstructorUserProperty)
class Meta:
app_label = 'tests_app'
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey"
] |
[((114, 146), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (130, 146), False, 'from django.db import models\n'), ((258, 303), 'django.db.models.ForeignKey', 'models.ForeignKey', (['KeyConstructorUserProperty'], {}), '(KeyConstructorUserProperty)\n', (275, 303), False, 'from django.db import models\n')]
|
# ******************************************************************************
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import numbers
from typing import Union
class NumericLimits(object):
"""Class providing interface to extract numerical limits for given data type."""
@staticmethod
def _get_number_limits_class(dtype):
# type: (np.dtype) -> Union[IntegralLimits, FloatingPointLimits]
"""Return specialized class instance with limits set for given data type.
:param dtype: The data type we want to check limits for.
:return: The specialized class instance providing numeric limits.
"""
data_type = dtype.type
value = data_type(1)
if isinstance(value, numbers.Integral):
return IntegralLimits(data_type)
elif isinstance(value, numbers.Real):
return FloatingPointLimits(data_type)
else:
raise ValueError('NumericLimits: unsupported data type: <{}>.'.format(dtype.type))
@staticmethod
def _get_dtype(dtype): # type: (Union[np.dtype, int, float]) -> np.dtype
"""Return numpy dtype object wrapping provided data type.
:param dtype: The data type to be wrapped.
:return: The numpy dtype object.
"""
return dtype if isinstance(dtype, np.dtype) else np.dtype(dtype)
@classmethod
def max(cls, dtype): # type: (np.dtype) -> Union[int, float]
"""Return maximum value that can be represented in given data type.
:param dtype: The data type we want to check maximum value for.
:return: The maximum value.
"""
return cls._get_number_limits_class(cls._get_dtype(dtype)).max
@classmethod
def min(cls, dtype): # type: (np.dtype) -> Union[int, float]
"""Return minimum value that can be represented in given data type.
:param dtype: The data type we want to check minimum value for.
:return: The minimum value.
"""
return cls._get_number_limits_class(cls._get_dtype(dtype)).min
class FloatingPointLimits(object):
"""Class providing access to numeric limits for floating point data types."""
def __init__(self, data_type): # type: (type) -> None
self.data_type = data_type
@property
def max(self): # type: () -> float
"""Provide maximum representable value by stored data type.
:return: The maximum value.
"""
return np.finfo(self.data_type).max
@property
def min(self): # type: () -> float
"""Provide minimum representable value by stored data type.
:return: The minimum value.
"""
return np.finfo(self.data_type).min
class IntegralLimits(object):
"""Class providing access to numeric limits for integral data types."""
def __init__(self, data_type): # type: (type) -> None
self.data_type = data_type
@property
def max(self): # type: () -> int
"""Provide maximum representable value by stored data type.
:return: The maximum value.
"""
return np.iinfo(self.data_type).max
@property
def min(self): # type: () -> int
"""Provide minimum representable value by stored data type.
:return: The minimum value.
"""
return np.iinfo(self.data_type).min
|
[
"numpy.finfo",
"numpy.dtype",
"numpy.iinfo"
] |
[((2119, 2134), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (2127, 2134), True, 'import numpy as np\n'), ((3240, 3264), 'numpy.finfo', 'np.finfo', (['self.data_type'], {}), '(self.data_type)\n', (3248, 3264), True, 'import numpy as np\n'), ((3456, 3480), 'numpy.finfo', 'np.finfo', (['self.data_type'], {}), '(self.data_type)\n', (3464, 3480), True, 'import numpy as np\n'), ((3873, 3897), 'numpy.iinfo', 'np.iinfo', (['self.data_type'], {}), '(self.data_type)\n', (3881, 3897), True, 'import numpy as np\n'), ((4087, 4111), 'numpy.iinfo', 'np.iinfo', (['self.data_type'], {}), '(self.data_type)\n', (4095, 4111), True, 'import numpy as np\n')]
|
# Copyright 2020 Konstruktor, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import platform
import time
from unittest import mock
from unittest.mock import patch, call
from pytest import fixture
from tethys.core.pipes.pipe_zero import ZeroPipe
from tethys.core.sessions.sess_zero import ZeroSession
from tethys.core.stations.station_zero import ZeroStation
from tethys.core.streams.stream_zero import ZeroStream
from tethys.core.transports.transport_zero import ZeroTransport
class MockTransport(ZeroTransport):
def __init__(self):
pass
connect = mock.MagicMock()
disconnect = mock.MagicMock()
class MockSession(ZeroSession):
closing_mode = None
def __init__(self):
self._closed = False
@property
def closed(self):
return self._closed
class MockStation(ZeroStation):
def __init__(self):
pass
class TestZeroStream:
@staticmethod
def teardown_method():
MockTransport.connect.reset_mock()
MockTransport.disconnect.reset_mock()
@fixture
def pipe(self):
pipe = mock.MagicMock(spec=ZeroPipe)
return pipe
@fixture
def session(self):
session = MockSession()
return session
@fixture
def transport(self):
return MockTransport()
@fixture
def station(self):
return MockStation()
@fixture
def stream(self, pipe, session, transport):
return ZeroStream(pipe, session, transport)
# init
def test_init_with_transport_cb(self, pipe, session, transport):
def get_transport(_):
return transport
get_transport = mock.MagicMock(side_effect=get_transport)
stream = ZeroStream(pipe, session, get_transport)
assert stream.transport == transport
# conn context
def test_new_connection_context(self, stream):
with stream.connection_context():
MockTransport.connect.assert_called_once_with(stream)
MockTransport.disconnect.assert_not_called()
MockTransport.disconnect.assert_called_once_with(stream)
def test_old_connection_context(self, stream):
MockTransport._connections[stream.id] = stream
with stream.connection_context():
MockTransport.connect.assert_not_called()
MockTransport.disconnect.assert_not_called()
# heartbeat
def test_heartbeat_fail_delay(self, stream):
assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY
stream.station = mock.MagicMock(spec=ZeroStation)
stream.station.heartbeat_fail_delay = 0
assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY
stream.station.heartbeat_fail_delay = 12345
assert stream.heartbeat_fail_delay == 12345
def test_busy_false(self, stream):
stream.refresh = mock.MagicMock()
stream.station = mock.MagicMock(spec=ZeroStation)
stream.station.heartbeat_fail_delay = 1
stream.heartbeat_ts = time.time() - 10
assert stream.is_busy is False
assert stream.refresh.call_count == 1
def test_busy_true(self, stream):
stream.refresh = mock.MagicMock()
stream.station = mock.MagicMock(spec=ZeroStation)
stream.station.heartbeat_fail_delay = 1000
stream.heartbeat_ts = time.time()
assert stream.is_busy is True
assert stream.refresh.call_count == 1
def test_heartbeat(self, stream):
stream.save = mock.MagicMock()
with patch("time.time", lambda: 12345):
stream.heartbeat()
assert stream.heartbeat_ts == 12345
stream.save.assert_called_once_with(save_dependency=False)
# open
def test_open(self, stream):
stream.save = mock.MagicMock()
stream.closed = True
assert stream.open() is stream
assert stream.closed is False
stream.save.assert_called_once_with(save_dependency=False)
def test_open_no_commit(self, stream):
stream.save = mock.MagicMock()
stream.closed = True
assert stream.open(save=False) is stream
assert stream.closed is False
stream.save.assert_not_called()
# close
def test_close(self, stream):
stream.save = mock.MagicMock()
assert stream.close() is stream
assert stream.closed is True
stream.save.assert_called_once_with(save_dependency=False)
def test_close_no_commit(self, stream):
stream.save = mock.MagicMock()
assert stream.close(save=False) is stream
assert stream.closed is True
stream.save.assert_not_called()
# read
def test_read(self, stream):
data = ["packet", 0, {}, "", None] + [None, "packet"] * 5
result_data = list(filter(lambda x: x is not None, data))
iter_data = iter(data)
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item is ...:
break
result.append(item)
if platform.python_implementation().lower() == "pypy":
gc.collect()
assert result == result_data
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in data]
)
def test_read_n_packets(self, stream):
iter_data = iter([None, "packet"] + ["packet"] * 10)
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(count=5, test_kw=1):
if item is ...:
break
result.append(item)
assert result == ["packet"] * 5
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(6)]
)
def test_read_while_stream_open(self, stream):
iter_data = iter(range(10))
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item == 4:
stream.closed = True
if item is ...:
break
result.append(item)
assert result == list(range(5))
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)]
)
def test_read_while_sess_open(self, stream):
stream.session._closed = True
iter_data = iter([0, 1, 2, 3, None, 4])
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item is ...:
break
result.append(item)
assert result == list(range(4))
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)]
)
def test_read_when_station_changed(self, stream, station):
iter_data = iter(range(10))
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item == 4:
stream.station = station
if item is ...:
break
result.append(item)
assert result == list(range(5))
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)]
)
def test_read_none(self, stream):
iter_data = iter([None, "packet"] + ["packet"] * 10)
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(wait_timeout=1, test_kw=1):
if item is ...:
break
result.append(item)
assert result == []
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_called_once_with(stream, wait_timeout=1, test_kw=1)
# write
def test_write(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.write("packet", test_kw=1)
stream.transport.send.assert_called_once_with(stream, "packet", test_kw=1)
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
def test_write_many(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.write("packet", many=True, test_kw=1)
stream.transport.send.assert_has_calls(
[call(stream, i, test_kw=1) for i in "packet"]
)
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
def test_write_when_closed(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.closed = True
stream.write("packet", test_kw=1)
stream.transport.send.assert_not_called()
stream.connection_context.assert_not_called()
connection_context.__enter__.assert_not_called()
connection_context.__exit__.assert_not_called()
def test_write_out(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.closed = True
stream.pipe.node_b = "<out>"
stream.write("packet", test_kw=1)
stream.transport.send.assert_called_once_with(stream, "packet", test_kw=1)
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
# ack
def test_ack(self, stream):
stream.transport.ack = mock.MagicMock()
stream.ack("message", test_kw=1)
stream.transport.ack.assert_called_once_with(stream, "message", test_kw=1)
def test_ack_closed(self, stream):
stream.closed = True
stream.transport.ack = mock.MagicMock()
stream.ack("message", test_kw=1)
stream.transport.ack.assert_not_called()
# redirect
def test_redirect(self, stream, station):
station.save = mock.MagicMock()
station.stream_lock_ttl = 0
stream.save = mock.MagicMock()
stream.redirect_to(station)
assert stream.station == station
station.save.assert_called_once_with(save_dependency=False)
stream.save.assert_called_once_with(save_dependency=False)
# open/close context
def test_context(self, stream):
stream.open = mock.MagicMock()
stream.close = mock.MagicMock()
with stream:
stream.open.assert_called_once_with(save=False)
stream.close.assert_not_called()
stream.close.assert_called_once_with(save=False)
|
[
"platform.python_implementation",
"unittest.mock.MagicMock",
"unittest.mock.call",
"tethys.core.streams.stream_zero.ZeroStream",
"gc.collect",
"time.time",
"unittest.mock.patch"
] |
[((1104, 1120), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1118, 1120), False, 'from unittest import mock\n'), ((1138, 1154), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1152, 1154), False, 'from unittest import mock\n'), ((1610, 1639), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'spec': 'ZeroPipe'}), '(spec=ZeroPipe)\n', (1624, 1639), False, 'from unittest import mock\n'), ((1965, 2001), 'tethys.core.streams.stream_zero.ZeroStream', 'ZeroStream', (['pipe', 'session', 'transport'], {}), '(pipe, session, transport)\n', (1975, 2001), False, 'from tethys.core.streams.stream_zero import ZeroStream\n'), ((2168, 2209), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'get_transport'}), '(side_effect=get_transport)\n', (2182, 2209), False, 'from unittest import mock\n'), ((2228, 2268), 'tethys.core.streams.stream_zero.ZeroStream', 'ZeroStream', (['pipe', 'session', 'get_transport'], {}), '(pipe, session, get_transport)\n', (2238, 2268), False, 'from tethys.core.streams.stream_zero import ZeroStream\n'), ((3049, 3081), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'spec': 'ZeroStation'}), '(spec=ZeroStation)\n', (3063, 3081), False, 'from unittest import mock\n'), ((3382, 3398), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (3396, 3398), False, 'from unittest import mock\n'), ((3424, 3456), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'spec': 'ZeroStation'}), '(spec=ZeroStation)\n', (3438, 3456), False, 'from unittest import mock\n'), ((3702, 3718), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (3716, 3718), False, 'from unittest import mock\n'), ((3744, 3776), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'spec': 'ZeroStation'}), '(spec=ZeroStation)\n', (3758, 3776), False, 'from unittest import mock\n'), ((3858, 3869), 'time.time', 'time.time', ([], {}), '()\n', (3867, 3869), False, 'import time\n'), ((4016, 4032), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4030, 4032), False, 'from unittest import mock\n'), ((4293, 4309), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4307, 4309), False, 'from unittest import mock\n'), ((4550, 4566), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4564, 4566), False, 'from unittest import mock\n'), ((4794, 4810), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4808, 4810), False, 'from unittest import mock\n'), ((5023, 5039), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (5037, 5039), False, 'from unittest import mock\n'), ((5556, 5572), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (5570, 5572), False, 'from unittest import mock\n'), ((5609, 5664), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': '(lambda : connection_context)'}), '(side_effect=lambda : connection_context)\n', (5623, 5664), False, 'from unittest import mock\n'), ((5718, 5753), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'recv_cb'}), '(side_effect=recv_cb)\n', (5732, 5753), False, 'from unittest import mock\n'), ((6618, 6634), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (6632, 6634), False, 'from unittest import mock\n'), ((6671, 6726), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': '(lambda : connection_context)'}), '(side_effect=lambda : connection_context)\n', (6685, 6726), False, 'from unittest import mock\n'), ((6780, 6815), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'recv_cb'}), '(side_effect=recv_cb)\n', (6794, 6815), False, 'from unittest import mock\n'), ((7590, 7606), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (7604, 7606), False, 'from unittest import mock\n'), ((7643, 7698), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': '(lambda : connection_context)'}), '(side_effect=lambda : connection_context)\n', (7657, 7698), False, 'from unittest import mock\n'), ((7752, 7787), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'recv_cb'}), '(side_effect=recv_cb)\n', (7766, 7787), False, 'from unittest import mock\n'), ((8667, 8683), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (8681, 8683), False, 'from unittest import mock\n'), ((8720, 8775), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': '(lambda : connection_context)'}), '(side_effect=lambda : connection_context)\n', (8734, 8775), False, 'from unittest import mock\n'), ((8829, 8864), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'recv_cb'}), '(side_effect=recv_cb)\n', (8843, 8864), False, 'from unittest import mock\n'), ((9643, 9659), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (9657, 9659), False, 'from unittest import mock\n'), ((9696, 9751), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': '(lambda : connection_context)'}), '(side_effect=lambda : connection_context)\n', (9710, 9751), False, 'from unittest import mock\n'), ((9805, 9840), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'recv_cb'}), '(side_effect=recv_cb)\n', (9819, 9840), False, 'from unittest import mock\n'), ((10687, 10703), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (10701, 10703), False, 'from unittest import mock\n'), ((10740, 10795), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': '(lambda : connection_context)'}), '(side_effect=lambda : connection_context)\n', (10754, 10795), False, 'from unittest import mock\n'), ((10849, 10884), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'recv_cb'}), '(side_effect=recv_cb)\n', (10863, 10884), False, 'from unittest import mock\n'), ((11420, 11436), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (11434, 11436), False, 'from unittest import mock\n'), ((11473, 11528), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': '(lambda : connection_context)'}), '(side_effect=lambda : connection_context)\n', (11487, 11528), False, 'from unittest import mock\n'), ((11582, 11598), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (11596, 11598), False, 'from unittest import mock\n'), ((11971, 11987), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (11985, 11987), False, 'from unittest import mock\n'), ((12024, 12079), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': '(lambda : connection_context)'}), '(side_effect=lambda : connection_context)\n', (12038, 12079), False, 'from unittest import mock\n'), ((12133, 12149), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (12147, 12149), False, 'from unittest import mock\n'), ((12574, 12590), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (12588, 12590), False, 'from unittest import mock\n'), ((12627, 12682), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': '(lambda : connection_context)'}), '(side_effect=lambda : connection_context)\n', (12641, 12682), False, 'from unittest import mock\n'), ((12736, 12752), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (12750, 12752), False, 'from unittest import mock\n'), ((13111, 13127), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (13125, 13127), False, 'from unittest import mock\n'), ((13164, 13219), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': '(lambda : connection_context)'}), '(side_effect=lambda : connection_context)\n', (13178, 13219), False, 'from unittest import mock\n'), ((13273, 13289), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (13287, 13289), False, 'from unittest import mock\n'), ((13734, 13750), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (13748, 13750), False, 'from unittest import mock\n'), ((13977, 13993), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (13991, 13993), False, 'from unittest import mock\n'), ((14172, 14188), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (14186, 14188), False, 'from unittest import mock\n'), ((14248, 14264), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (14262, 14264), False, 'from unittest import mock\n'), ((14564, 14580), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (14578, 14580), False, 'from unittest import mock\n'), ((14604, 14620), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (14618, 14620), False, 'from unittest import mock\n'), ((3535, 3546), 'time.time', 'time.time', ([], {}), '()\n', (3544, 3546), False, 'import time\n'), ((4047, 4081), 'unittest.mock.patch', 'patch', (['"""time.time"""', '(lambda : 12345)'], {}), "('time.time', lambda : 12345)\n", (4052, 4081), False, 'from unittest.mock import patch, call\n'), ((5978, 5990), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5988, 5990), False, 'import gc\n'), ((6266, 6308), 'unittest.mock.call', 'call', (['stream'], {'wait_timeout': 'None', 'test_kw': '(1)'}), '(stream, wait_timeout=None, test_kw=1)\n', (6270, 6308), False, 'from unittest.mock import patch, call\n'), ((7251, 7293), 'unittest.mock.call', 'call', (['stream'], {'wait_timeout': 'None', 'test_kw': '(1)'}), '(stream, wait_timeout=None, test_kw=1)\n', (7255, 7293), False, 'from unittest.mock import patch, call\n'), ((8279, 8321), 'unittest.mock.call', 'call', (['stream'], {'wait_timeout': 'None', 'test_kw': '(1)'}), '(stream, wait_timeout=None, test_kw=1)\n', (8283, 8321), False, 'from unittest.mock import patch, call\n'), ((9292, 9334), 'unittest.mock.call', 'call', (['stream'], {'wait_timeout': 'None', 'test_kw': '(1)'}), '(stream, wait_timeout=None, test_kw=1)\n', (9296, 9334), False, 'from unittest.mock import patch, call\n'), ((10336, 10378), 'unittest.mock.call', 'call', (['stream'], {'wait_timeout': 'None', 'test_kw': '(1)'}), '(stream, wait_timeout=None, test_kw=1)\n', (10340, 10378), False, 'from unittest.mock import patch, call\n'), ((12266, 12292), 'unittest.mock.call', 'call', (['stream', 'i'], {'test_kw': '(1)'}), '(stream, i, test_kw=1)\n', (12270, 12292), False, 'from unittest.mock import patch, call\n'), ((5914, 5946), 'platform.python_implementation', 'platform.python_implementation', ([], {}), '()\n', (5944, 5946), False, 'import platform\n')]
|
# Generated by Django 3.0.1 on 2020-02-15 06:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='subject',
name='Number_Of_Questions',
field=models.IntegerField(default=0),
),
]
|
[
"django.db.models.IntegerField"
] |
[((336, 366), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (355, 366), False, 'from django.db import migrations, models\n')]
|
"""
Unit test for the DAG endpoints
"""
# Import from libraries
import json
# Import from internal modules
from cornflow.shared.const import EXEC_STATE_CORRECT, EXEC_STATE_MANUAL
from cornflow.tests.const import (
DAG_URL,
EXECUTION_URL_NORUN,
CASE_PATH,
INSTANCE_URL,
)
from cornflow.tests.unit.test_executions import TestExecutionsDetailEndpointMock
class TestDagEndpoint(TestExecutionsDetailEndpointMock):
def test_manual_dag_service_user(self):
with open(CASE_PATH) as f:
payload = json.load(f)
data = dict(
data=payload["data"],
state=EXEC_STATE_MANUAL,
)
payload_to_send = {**self.payload, **data}
token = self.create_service_user()
self.items_to_check = [
"config",
"name",
"description",
"schema",
"instance_id",
"state",
]
idx = self.create_new_row(
url=DAG_URL,
model=self.model,
payload=payload_to_send,
check_payload=True,
token=token,
)
def test_manual_dag_planner_user(self):
with open(CASE_PATH) as f:
payload = json.load(f)
data = dict(
data=payload["data"],
state=EXEC_STATE_MANUAL,
)
payload_to_send = {**self.payload, **data}
token = self.create_planner()
self.items_to_check = [
"config",
"name",
"description",
"schema",
"instance_id",
"state",
]
idx = self.create_new_row(
url=DAG_URL,
model=self.model,
payload=payload_to_send,
check_payload=True,
token=token,
)
class TestDagDetailEndpoint(TestExecutionsDetailEndpointMock):
def test_put_dag(self):
idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload)
with open(CASE_PATH) as f:
payload = json.load(f)
data = dict(
data=payload["data"],
state=EXEC_STATE_CORRECT,
)
payload_to_check = {**self.payload, **data}
token = self.create_service_user()
data = self.update_row(
url=DAG_URL + idx + "/",
payload_to_check=payload_to_check,
change=data,
token=token,
check_payload=False,
)
def test_get_dag(self):
idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload)
token = self.create_service_user()
data = self.get_one_row(
url=DAG_URL + idx + "/",
token=token,
check_payload=False,
payload=self.payload,
)
instance_data = self.get_one_row(
url=INSTANCE_URL + self.payload["instance_id"] + "/data/",
payload=dict(),
check_payload=False,
)
self.assertEqual(data["data"], instance_data["data"])
self.assertEqual(data["config"], self.payload["config"])
return
|
[
"json.load"
] |
[((530, 542), 'json.load', 'json.load', (['f'], {}), '(f)\n', (539, 542), False, 'import json\n'), ((1218, 1230), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1227, 1230), False, 'import json\n'), ((2030, 2042), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2039, 2042), False, 'import json\n')]
|
#-------------------------------------------------------------#
# ResNet50的网络部分
#-------------------------------------------------------------#
import keras.backend as K
from keras import backend as K
from keras import initializers, layers, regularizers
from keras.engine import InputSpec, Layer
from keras.initializers import random_normal
from keras.layers import (Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed,
ZeroPadding2D)
class BatchNormalization(Layer):
def __init__(self, epsilon=1e-3, axis=-1,
weights=None, beta_init='zero', gamma_init='one',
gamma_regularizer=None, beta_regularizer=None, **kwargs):
self.supports_masking = True
self.beta_init = initializers.get(beta_init)
self.gamma_init = initializers.get(gamma_init)
self.epsilon = epsilon
self.axis = axis
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.initial_weights = weights
super(BatchNormalization, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (input_shape[self.axis],)
self.gamma = self.add_weight(shape,
initializer=self.gamma_init,
regularizer=self.gamma_regularizer,
name='{}_gamma'.format(self.name),
trainable=False)
self.beta = self.add_weight(shape,
initializer=self.beta_init,
regularizer=self.beta_regularizer,
name='{}_beta'.format(self.name),
trainable=False)
self.running_mean = self.add_weight(shape, initializer='zero',
name='{}_running_mean'.format(self.name),
trainable=False)
self.running_std = self.add_weight(shape, initializer='one',
name='{}_running_std'.format(self.name),
trainable=False)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, x, mask=None):
assert self.built, 'Layer must be built before being called'
input_shape = K.int_shape(x)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis]
if sorted(reduction_axes) == range(K.ndim(x))[:-1]:
x_normed = K.batch_normalization(
x, self.running_mean, self.running_std,
self.beta, self.gamma,
epsilon=self.epsilon)
else:
broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape)
broadcast_running_std = K.reshape(self.running_std, broadcast_shape)
broadcast_beta = K.reshape(self.beta, broadcast_shape)
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
x_normed = K.batch_normalization(
x, broadcast_running_mean, broadcast_running_std,
broadcast_beta, broadcast_gamma,
epsilon=self.epsilon)
return x_normed
def get_config(self):
config = {'epsilon': self.epsilon,
'axis': self.axis,
'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer else None,
'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else None}
base_config = super(BatchNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def identity_block(input_tensor, kernel_size, filters, stage, block):
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2b')(x)
x = BatchNormalization(name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2c')(x)
x = BatchNormalization(name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '2b')(x)
x = BatchNormalization(name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '2c')(x)
x = BatchNormalization(name=bn_name_base + '2c')(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x
def ResNet50(inputs):
#-----------------------------------#
# 假设输入进来的图片是600,600,3
#-----------------------------------#
img_input = inputs
# 600,600,3 -> 300,300,64
x = ZeroPadding2D((3, 3))(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
x = BatchNormalization(name='bn_conv1')(x)
x = Activation('relu')(x)
# 300,300,64 -> 150,150,64
x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# 150,150,64 -> 150,150,256
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
# 150,150,256 -> 75,75,512
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
# 75,75,512 -> 38,38,1024
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
# 最终获得一个38,38,1024的共享特征层
return x
def identity_block_td(input_tensor, kernel_size, filters, stage, block):
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = TimeDistributed(Conv2D(nb_filter1, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), kernel_initializer='normal',padding='same'), name=conv_name_base + '2b')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x)
x = Add()([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block_td(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = TimeDistributed(Conv2D(nb_filter1, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', kernel_initializer='normal'), name=conv_name_base + '2b')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x)
shortcut = TimeDistributed(Conv2D(nb_filter3, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '1')(input_tensor)
shortcut = TimeDistributed(BatchNormalization(), name=bn_name_base + '1')(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
return x
def classifier_layers(x):
# num_rois, 14, 14, 1024 -> num_rois, 7, 7, 2048
x = conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', strides=(2, 2))
# num_rois, 7, 7, 2048 -> num_rois, 7, 7, 2048
x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='b')
# num_rois, 7, 7, 2048 -> num_rois, 7, 7, 2048
x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='c')
# num_rois, 7, 7, 2048 -> num_rois, 1, 1, 2048
x = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(x)
return x
|
[
"keras.layers.Conv2D",
"keras.engine.InputSpec",
"keras.backend.ndim",
"keras.layers.MaxPooling2D",
"keras.regularizers.get",
"keras.backend.reshape",
"keras.layers.add",
"keras.layers.AveragePooling2D",
"keras.backend.batch_normalization",
"keras.layers.Add",
"keras.layers.Activation",
"keras.initializers.get",
"keras.initializers.random_normal",
"keras.backend.int_shape",
"keras.layers.ZeroPadding2D"
] |
[((5015, 5044), 'keras.layers.add', 'layers.add', (['[x, input_tensor]'], {}), '([x, input_tensor])\n', (5025, 5044), False, 'from keras import initializers, layers, regularizers\n'), ((6269, 6294), 'keras.layers.add', 'layers.add', (['[x, shortcut]'], {}), '([x, shortcut])\n', (6279, 6294), False, 'from keras import initializers, layers, regularizers\n'), ((788, 815), 'keras.initializers.get', 'initializers.get', (['beta_init'], {}), '(beta_init)\n', (804, 815), False, 'from keras import initializers, layers, regularizers\n'), ((843, 871), 'keras.initializers.get', 'initializers.get', (['gamma_init'], {}), '(gamma_init)\n', (859, 871), False, 'from keras import initializers, layers, regularizers\n'), ((964, 999), 'keras.regularizers.get', 'regularizers.get', (['gamma_regularizer'], {}), '(gamma_regularizer)\n', (980, 999), False, 'from keras import initializers, layers, regularizers\n'), ((1033, 1067), 'keras.regularizers.get', 'regularizers.get', (['beta_regularizer'], {}), '(beta_regularizer)\n', (1049, 1067), False, 'from keras import initializers, layers, regularizers\n'), ((2664, 2678), 'keras.backend.int_shape', 'K.int_shape', (['x'], {}), '(x)\n', (2675, 2678), True, 'from keras import backend as K\n'), ((4588, 4606), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4598, 4606), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((4811, 4829), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4821, 4829), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((5054, 5072), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5064, 5072), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((5568, 5586), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5578, 5586), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((5807, 5825), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5817, 5825), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((6304, 6322), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6314, 6322), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((6546, 6567), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(3, 3)'], {}), '((3, 3))\n', (6559, 6567), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((6588, 6636), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(7, 7)'], {'strides': '(2, 2)', 'name': '"""conv1"""'}), "(64, (7, 7), strides=(2, 2), name='conv1')\n", (6594, 6636), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((6697, 6715), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6707, 6715), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((6762, 6814), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "((3, 3), strides=(2, 2), padding='same')\n", (6774, 6814), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((8297, 8315), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8307, 8315), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((8554, 8572), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8564, 8572), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((8778, 8783), 'keras.layers.Add', 'Add', ([], {}), '()\n', (8781, 8783), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((8812, 8830), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8822, 8830), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((9335, 9353), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9345, 9353), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((9593, 9611), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9603, 9611), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((10055, 10060), 'keras.layers.Add', 'Add', ([], {}), '()\n', (10058, 10060), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((10085, 10103), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (10095, 10103), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((1233, 1261), 'keras.engine.InputSpec', 'InputSpec', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1242, 1261), False, 'from keras.engine import InputSpec, Layer\n'), ((2974, 3084), 'keras.backend.batch_normalization', 'K.batch_normalization', (['x', 'self.running_mean', 'self.running_std', 'self.beta', 'self.gamma'], {'epsilon': 'self.epsilon'}), '(x, self.running_mean, self.running_std, self.beta,\n self.gamma, epsilon=self.epsilon)\n', (2995, 3084), True, 'from keras import backend as K\n'), ((3186, 3231), 'keras.backend.reshape', 'K.reshape', (['self.running_mean', 'broadcast_shape'], {}), '(self.running_mean, broadcast_shape)\n', (3195, 3231), True, 'from keras import backend as K\n'), ((3269, 3313), 'keras.backend.reshape', 'K.reshape', (['self.running_std', 'broadcast_shape'], {}), '(self.running_std, broadcast_shape)\n', (3278, 3313), True, 'from keras import backend as K\n'), ((3344, 3381), 'keras.backend.reshape', 'K.reshape', (['self.beta', 'broadcast_shape'], {}), '(self.beta, broadcast_shape)\n', (3353, 3381), True, 'from keras import backend as K\n'), ((3413, 3451), 'keras.backend.reshape', 'K.reshape', (['self.gamma', 'broadcast_shape'], {}), '(self.gamma, broadcast_shape)\n', (3422, 3451), True, 'from keras import backend as K\n'), ((3476, 3606), 'keras.backend.batch_normalization', 'K.batch_normalization', (['x', 'broadcast_running_mean', 'broadcast_running_std', 'broadcast_beta', 'broadcast_gamma'], {'epsilon': 'self.epsilon'}), '(x, broadcast_running_mean, broadcast_running_std,\n broadcast_beta, broadcast_gamma, epsilon=self.epsilon)\n', (3497, 3606), True, 'from keras import backend as K\n'), ((8113, 8168), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter1', '(1, 1)'], {'kernel_initializer': '"""normal"""'}), "(nb_filter1, (1, 1), kernel_initializer='normal')\n", (8119, 8168), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((8346, 8441), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter2', '(kernel_size, kernel_size)'], {'kernel_initializer': '"""normal"""', 'padding': '"""same"""'}), "(nb_filter2, (kernel_size, kernel_size), kernel_initializer='normal',\n padding='same')\n", (8352, 8441), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((8603, 8658), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter3', '(1, 1)'], {'kernel_initializer': '"""normal"""'}), "(nb_filter3, (1, 1), kernel_initializer='normal')\n", (8609, 8658), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((9134, 9206), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter1', '(1, 1)'], {'strides': 'strides', 'kernel_initializer': '"""normal"""'}), "(nb_filter1, (1, 1), strides=strides, kernel_initializer='normal')\n", (9140, 9206), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((9384, 9479), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter2', '(kernel_size, kernel_size)'], {'padding': '"""same"""', 'kernel_initializer': '"""normal"""'}), "(nb_filter2, (kernel_size, kernel_size), padding='same',\n kernel_initializer='normal')\n", (9390, 9479), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((9642, 9697), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter3', '(1, 1)'], {'kernel_initializer': '"""normal"""'}), "(nb_filter3, (1, 1), kernel_initializer='normal')\n", (9648, 9697), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((9840, 9912), 'keras.layers.Conv2D', 'Conv2D', (['nb_filter3', '(1, 1)'], {'strides': 'strides', 'kernel_initializer': '"""normal"""'}), "(nb_filter3, (1, 1), strides=strides, kernel_initializer='normal')\n", (9846, 9912), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((10610, 10634), 'keras.layers.AveragePooling2D', 'AveragePooling2D', (['(7, 7)'], {}), '((7, 7))\n', (10626, 10634), False, 'from keras.layers import Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed, ZeroPadding2D\n'), ((4452, 4478), 'keras.initializers.random_normal', 'random_normal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (4465, 4478), False, 'from keras.initializers import random_normal\n'), ((4686, 4712), 'keras.initializers.random_normal', 'random_normal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (4699, 4712), False, 'from keras.initializers import random_normal\n'), ((4888, 4914), 'keras.initializers.random_normal', 'random_normal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (4901, 4914), False, 'from keras.initializers import random_normal\n'), ((5416, 5442), 'keras.initializers.random_normal', 'random_normal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (5429, 5442), False, 'from keras.initializers import random_normal\n'), ((5666, 5692), 'keras.initializers.random_normal', 'random_normal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (5679, 5692), False, 'from keras.initializers import random_normal\n'), ((5884, 5910), 'keras.initializers.random_normal', 'random_normal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (5897, 5910), False, 'from keras.initializers import random_normal\n'), ((6096, 6122), 'keras.initializers.random_normal', 'random_normal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (6109, 6122), False, 'from keras.initializers import random_normal\n'), ((2933, 2942), 'keras.backend.ndim', 'K.ndim', (['x'], {}), '(x)\n', (2939, 2942), True, 'from keras import backend as K\n')]
|
# coding=utf-8
# Copyright 2022 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2,python3
"""Classifier-related code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin.tf
from meta_dataset.models import functional_backbones
import tensorflow.compat.v1 as tf
def linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier,
cosine_logits_multiplier, use_weight_norm):
"""Passes embeddings through the linear layer defined by w_fc and b_fc.
Args:
embeddings: A Tensor of size [batch size, embedding dim].
w_fc: A Tensor of size [embedding dim, num outputs].
b_fc: Either None, or a Tensor of size [num outputs] or []. If
cosine_classifier is False, it can not be None.
cosine_classifier: A bool. If true, a cosine classifier is used which does
not require the bias b_fc.
cosine_logits_multiplier: A float. Only used if cosine_classifier is True,
and multiplies the resulting logits.
use_weight_norm: A bool. Whether weight norm was used. If so, then if using
cosine classifier, normalize only the embeddings but not the weights.
Returns:
logits: A Tensor of size [batch size, num outputs].
"""
if cosine_classifier:
# Each column of the weight matrix may be interpreted as a class
# representation (of the same dimenionality as the embedding space). The
# logit for an embedding vector belonging to that class is the cosine
# similarity between that embedding and that class representation.
embeddings = tf.nn.l2_normalize(embeddings, axis=1, epsilon=1e-3)
if not use_weight_norm:
# Only normalize the weights if weight norm was not used.
w_fc = tf.nn.l2_normalize(w_fc, axis=0, epsilon=1e-3)
logits = tf.matmul(embeddings, w_fc)
# Scale the logits as passing numbers in [-1, 1] to softmax is not very
# expressive.
logits *= cosine_logits_multiplier
else:
assert b_fc is not None
logits = tf.matmul(embeddings, w_fc) + b_fc
return logits
@gin.configurable
def linear_classifier(embeddings, num_classes, cosine_classifier,
cosine_logits_multiplier, use_weight_norm, weight_decay):
"""Forward pass through a linear classifier, or possibly a cosine classifier.
Args:
embeddings: A Tensor of size [batch size, embedding dim].
num_classes: An integer; the dimension of the classification.
cosine_classifier: A bool. If true, a cosine classifier is used, which does
not require a bias.
cosine_logits_multiplier: A float. Only used if cosine_classifier is True,
and multiplies the resulting logits.
use_weight_norm: A bool. Whether weight norm was used. If so, then if using
cosine classifier, normalize only the embeddings but not the weights.
weight_decay: A float; the scalar multiple on the L2 regularization of the
weight matrix.
Returns:
logits: A Tensor of size [batch size, num outputs].
"""
embedding_dims = embeddings.get_shape().as_list()[-1]
if use_weight_norm:
# A variable to keep track of whether the initialization has already
# happened.
data_dependent_init_done = tf.get_variable(
'data_dependent_init_done',
initializer=0,
dtype=tf.int32,
trainable=False)
w_fc = tf.get_variable(
'w_fc', [embedding_dims, num_classes],
initializer=tf.random_normal_initializer(0, 0.05),
trainable=True)
# This init is temporary as it needs to be done in a data-dependent way.
# It will be overwritten during the first forward pass through this layer.
g = tf.get_variable(
'g',
dtype=tf.float32,
initializer=tf.ones([num_classes]),
trainable=True)
b_fc = None
if not cosine_classifier:
# Also initialize a bias.
b_fc = tf.get_variable(
'b_fc', initializer=tf.zeros([num_classes]), trainable=True)
def _do_data_dependent_init():
"""Returns ops for the data-dependent init of g and maybe b_fc."""
w_fc_normalized = tf.nn.l2_normalize(w_fc.read_value(), [0])
output_init = tf.matmul(embeddings, w_fc_normalized)
mean_init, var_init = tf.nn.moments(output_init, [0])
# Data-dependent init values.
g_init_value = 1. / tf.sqrt(var_init + 1e-10)
ops = [tf.assign(g, g_init_value)]
if not cosine_classifier:
# Also initialize a bias in a data-dependent way.
b_fc_init_value = -mean_init * g_init_value
ops.append(tf.assign(b_fc, b_fc_init_value))
# Mark that the data-dependent initialization is done to prevent it from
# happening again in the future.
ops.append(tf.assign(data_dependent_init_done, 1))
return tf.group(*ops)
# Possibly perform data-dependent init (if it hasn't been done already).
init_op = tf.cond(
tf.equal(data_dependent_init_done, 0), _do_data_dependent_init,
tf.no_op)
with tf.control_dependencies([init_op]):
# Apply weight normalization.
w_fc *= g / tf.sqrt(tf.reduce_sum(tf.square(w_fc), [0]))
# Forward pass through the layer defined by w_fc and b_fc.
logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc,
cosine_classifier,
cosine_logits_multiplier, True)
else:
# No weight norm.
w_fc = functional_backbones.weight_variable([embedding_dims, num_classes],
weight_decay=weight_decay)
b_fc = None
if not cosine_classifier:
# Also initialize a bias.
b_fc = functional_backbones.bias_variable([num_classes])
# Forward pass through the layer defined by w_fc and b_fc.
logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc,
cosine_classifier,
cosine_logits_multiplier, False)
return logits
@gin.configurable
def separate_head_linear_classifier(embeddings, num_classes, dataset_idx,
start_idx, cosine_classifier,
cosine_logits_multiplier, learnable_scale,
weight_decay):
"""A linear classifier with num_sets heads, for different datasets.
Args:
embeddings: A Tensor of size [batch size, embedding dim].
num_classes: A list of integers; the dimension of the classifier layers of
the different heads.
dataset_idx: An int Tensor. The index of the dataset head to use.
start_idx: An int Tensor. The index of the first class of the given dataset.
cosine_classifier: A bool. If true, a cosine classifier is used, which does
not require a bias.
cosine_logits_multiplier: A float. Only used if cosine_classifier is True,
and multiplies the resulting logits.
learnable_scale: A bool. Whether to make the cosine_logits_multiplier a
learnable parameter. Only applies if cosine_classifier is True.
weight_decay: A float; the scalar multiple on the L2 regularization of the
weight matrix.
Returns:
logits: A Tensor of size [batch size, num outputs].
"""
if not cosine_classifier:
raise NotImplementedError('`separate_head_linear_classifier` currently '
'only supports `cosine_classifier` True.')
if learnable_scale:
cosine_logits_multiplier = tf.get_variable(
'cosine_scale',
initializer=cosine_logits_multiplier,
dtype=tf.float32,
trainable=True)
embedding_dims = embeddings.get_shape().as_list()[-1]
w_fc = functional_backbones.weight_variable(
[embedding_dims, sum(num_classes)], weight_decay=weight_decay)
# Select the output "head" to use in the forward pass.
dataset_num_classes = tf.gather(num_classes, dataset_idx)
w_fc = w_fc[:, start_idx:start_idx + dataset_num_classes]
logits = linear_classifier_forward_pass(embeddings, w_fc, None,
cosine_classifier,
cosine_logits_multiplier, False)
return logits
|
[
"tensorflow.compat.v1.nn.l2_normalize",
"tensorflow.compat.v1.matmul",
"tensorflow.compat.v1.nn.moments",
"meta_dataset.models.functional_backbones.weight_variable",
"tensorflow.compat.v1.get_variable",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.gather",
"meta_dataset.models.functional_backbones.bias_variable",
"tensorflow.compat.v1.random_normal_initializer",
"tensorflow.compat.v1.sqrt",
"tensorflow.compat.v1.square",
"tensorflow.compat.v1.assign",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.group",
"tensorflow.compat.v1.ones"
] |
[((8433, 8468), 'tensorflow.compat.v1.gather', 'tf.gather', (['num_classes', 'dataset_idx'], {}), '(num_classes, dataset_idx)\n', (8442, 8468), True, 'import tensorflow.compat.v1 as tf\n'), ((2153, 2206), 'tensorflow.compat.v1.nn.l2_normalize', 'tf.nn.l2_normalize', (['embeddings'], {'axis': '(1)', 'epsilon': '(0.001)'}), '(embeddings, axis=1, epsilon=0.001)\n', (2171, 2206), True, 'import tensorflow.compat.v1 as tf\n'), ((2371, 2398), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['embeddings', 'w_fc'], {}), '(embeddings, w_fc)\n', (2380, 2398), True, 'import tensorflow.compat.v1 as tf\n'), ((3773, 3868), 'tensorflow.compat.v1.get_variable', 'tf.get_variable', (['"""data_dependent_init_done"""'], {'initializer': '(0)', 'dtype': 'tf.int32', 'trainable': '(False)'}), "('data_dependent_init_done', initializer=0, dtype=tf.int32,\n trainable=False)\n", (3788, 3868), True, 'import tensorflow.compat.v1 as tf\n'), ((6002, 6100), 'meta_dataset.models.functional_backbones.weight_variable', 'functional_backbones.weight_variable', (['[embedding_dims, num_classes]'], {'weight_decay': 'weight_decay'}), '([embedding_dims, num_classes],\n weight_decay=weight_decay)\n', (6038, 6100), False, 'from meta_dataset.models import functional_backbones\n'), ((8041, 8149), 'tensorflow.compat.v1.get_variable', 'tf.get_variable', (['"""cosine_scale"""'], {'initializer': 'cosine_logits_multiplier', 'dtype': 'tf.float32', 'trainable': '(True)'}), "('cosine_scale', initializer=cosine_logits_multiplier, dtype\n =tf.float32, trainable=True)\n", (8056, 8149), True, 'import tensorflow.compat.v1 as tf\n'), ((2311, 2358), 'tensorflow.compat.v1.nn.l2_normalize', 'tf.nn.l2_normalize', (['w_fc'], {'axis': '(0)', 'epsilon': '(0.001)'}), '(w_fc, axis=0, epsilon=0.001)\n', (2329, 2358), True, 'import tensorflow.compat.v1 as tf\n'), ((2581, 2608), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['embeddings', 'w_fc'], {}), '(embeddings, w_fc)\n', (2590, 2608), True, 'import tensorflow.compat.v1 as tf\n'), ((4720, 4758), 'tensorflow.compat.v1.matmul', 'tf.matmul', (['embeddings', 'w_fc_normalized'], {}), '(embeddings, w_fc_normalized)\n', (4729, 4758), True, 'import tensorflow.compat.v1 as tf\n'), ((4787, 4818), 'tensorflow.compat.v1.nn.moments', 'tf.nn.moments', (['output_init', '[0]'], {}), '(output_init, [0])\n', (4800, 4818), True, 'import tensorflow.compat.v1 as tf\n'), ((5331, 5345), 'tensorflow.compat.v1.group', 'tf.group', (['*ops'], {}), '(*ops)\n', (5339, 5345), True, 'import tensorflow.compat.v1 as tf\n'), ((5455, 5492), 'tensorflow.compat.v1.equal', 'tf.equal', (['data_dependent_init_done', '(0)'], {}), '(data_dependent_init_done, 0)\n', (5463, 5492), True, 'import tensorflow.compat.v1 as tf\n'), ((5547, 5581), 'tensorflow.compat.v1.control_dependencies', 'tf.control_dependencies', (['[init_op]'], {}), '([init_op])\n', (5570, 5581), True, 'import tensorflow.compat.v1 as tf\n'), ((6236, 6285), 'meta_dataset.models.functional_backbones.bias_variable', 'functional_backbones.bias_variable', (['[num_classes]'], {}), '([num_classes])\n', (6270, 6285), False, 'from meta_dataset.models import functional_backbones\n'), ((3994, 4031), 'tensorflow.compat.v1.random_normal_initializer', 'tf.random_normal_initializer', (['(0)', '(0.05)'], {}), '(0, 0.05)\n', (4022, 4031), True, 'import tensorflow.compat.v1 as tf\n'), ((4297, 4319), 'tensorflow.compat.v1.ones', 'tf.ones', (['[num_classes]'], {}), '([num_classes])\n', (4304, 4319), True, 'import tensorflow.compat.v1 as tf\n'), ((4881, 4906), 'tensorflow.compat.v1.sqrt', 'tf.sqrt', (['(var_init + 1e-10)'], {}), '(var_init + 1e-10)\n', (4888, 4906), True, 'import tensorflow.compat.v1 as tf\n'), ((4920, 4946), 'tensorflow.compat.v1.assign', 'tf.assign', (['g', 'g_init_value'], {}), '(g, g_init_value)\n', (4929, 4946), True, 'import tensorflow.compat.v1 as tf\n'), ((5278, 5316), 'tensorflow.compat.v1.assign', 'tf.assign', (['data_dependent_init_done', '(1)'], {}), '(data_dependent_init_done, 1)\n', (5287, 5316), True, 'import tensorflow.compat.v1 as tf\n'), ((4483, 4506), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[num_classes]'], {}), '([num_classes])\n', (4491, 4506), True, 'import tensorflow.compat.v1 as tf\n'), ((5109, 5141), 'tensorflow.compat.v1.assign', 'tf.assign', (['b_fc', 'b_fc_init_value'], {}), '(b_fc, b_fc_init_value)\n', (5118, 5141), True, 'import tensorflow.compat.v1 as tf\n'), ((5659, 5674), 'tensorflow.compat.v1.square', 'tf.square', (['w_fc'], {}), '(w_fc)\n', (5668, 5674), True, 'import tensorflow.compat.v1 as tf\n')]
|
#!/usr/bin/python3
# -*- coding: utf8 -*-
# -*- Mode: Python; py-indent-offset: 4 -*-
"""File storage adapter for timevortex project"""
import os
from os import listdir, makedirs
from os.path import isfile, join, exists
from time import tzname
from datetime import datetime
import pytz
import dateutil.parser
from django.conf import settings
from django.utils import timezone
from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE
from timevortex.utils.globals import KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE, SYSTEM_SITE_ID
SETTINGS_FILE_STORAGE_FOLDER = "SETTINGS_FILE_STORAGE_FOLDER"
SETTINGS_DEFAULT_FILE_STORAGE_FOLDER = "/tmp/data/"
def get_lines_number(file_path):
"""Get lines number
"""
return sum(1 for line in open(file_path))
def get_series_per_file(site_folder, file_prefix):
"""Get series per file
"""
series = {}
for filename in listdir(site_folder):
is_file = isfile(join(site_folder, filename))
if is_file and file_prefix in filename:
complete_filename = "%s/%s" % (site_folder, filename)
with open(complete_filename, "r") as filed:
temp_series = filed.readlines()
for line in temp_series:
array_line = line.split("\t")
if len(array_line) >= 2:
series[array_line[1]] = array_line[0]
return series
def get_last_file_name(site_folder, file_prefix):
"""Get last filename
"""
old_date = None
last_filename = ""
for new_filename in listdir(site_folder):
is_file = isfile(join(site_folder, new_filename))
if is_file and file_prefix in new_filename:
old_date, last_filename = update_last_file_name(file_prefix, old_date, last_filename, new_filename)
return last_filename
def update_last_file_name(file_prefix, old_date, last_filename, new_filename):
"""Update last file name
"""
try:
new_date = new_filename.replace(file_prefix, "")
new_date = datetime.strptime(new_date, "%Y-%m-%d")
if old_date is None or new_date > old_date:
return new_date, new_filename
except ValueError:
LOGGER.error("Not right file")
return old_date, last_filename
class FileStorage(object):
"""Class that help us to store and load data over several file"""
def __init__(self, folder_path):
"""Constructor"""
self.folder_path = folder_path
if not exists(self.folder_path):
makedirs(self.folder_path)
def insert_series(self, series):
"""Insert series in DB
:param series: Representation of a series
:type series: dict.
"""
self.insert(series)
def insert(self, message):
"""Insert data in file"""
file_folder = "%s/%s" % (self.folder_path, message[KEY_SITE_ID])
file_date = timezone.localtime(
dateutil.parser.parse(message[KEY_DATE]).replace(tzinfo=pytz.UTC)).strftime("%Y-%m-%d")
if not exists(file_folder):
makedirs(file_folder)
raw_file = "%s/%s.tsv.%s" % (
file_folder, message[KEY_VARIABLE_ID], file_date)
extracted = open(raw_file, "a+")
extracted.write("%s\t%s\t%s\t%s\n" % (
message[KEY_VALUE],
message[KEY_DATE],
message[KEY_DST_TIMEZONE],
message[KEY_NON_DST_TIMEZONE]))
extracted.close()
def insert_error(self, message):
"""Function that store error in errors collection and in log
:param message: Error to insert in DB
:type message: str.
"""
LOGGER.error(message)
message[KEY_VARIABLE_ID] = KEY_ERROR
self.insert(message)
def store_error(self, error):
"""Function that create valid error message
:param error: Mal formed message
:type error: str.
"""
message = {
KEY_VALUE: error,
KEY_VARIABLE_ID: KEY_ERROR,
KEY_SITE_ID: SYSTEM_SITE_ID,
KEY_DATE: datetime.utcnow().isoformat('T'),
KEY_DST_TIMEZONE: tzname[1],
KEY_NON_DST_TIMEZONE: tzname[0]
}
LOGGER.error(error)
self.insert(message)
def get_series(self, site_id, variable_id):
"""Retrieve all series for a variable_id in site_id
"""
element = variable_id
file_prefix = "%s.tsv." % element
site_folder = "%s/%s" % (self.folder_path, site_id)
if exists(site_folder):
series = get_series_per_file(site_folder, file_prefix)
else:
series = {}
return series
def get_last_series(self, site_id, variable_id):
"""Retrieve last value of variable_id in site_id
"""
element = variable_id
file_prefix = "%s.tsv." % element
site_folder = "%s/%s" % (self.folder_path, site_id)
if exists(site_folder):
last_filename = get_last_file_name(site_folder, file_prefix)
last_filename = "%s/%s" % (site_folder, last_filename)
try:
with open(last_filename, "rb") as filed2:
for last in filed2:
pass
except IsADirectoryError:
return None
LOGGER.debug(last) # pylint: disable=I0011,W0631
last = last.decode("utf-8").replace("\n", "") # pylint: disable=I0011,W0631
return {
KEY_VARIABLE_ID: element,
KEY_SITE_ID: site_id,
KEY_VALUE: last.split("\t")[0],
KEY_DATE: last.split("\t")[1],
KEY_DST_TIMEZONE: last.split("\t")[2],
KEY_NON_DST_TIMEZONE: last.split("\t")[3]
}
return None
def get_last_error(self, site_id):
"""Retrieve last error of a site_id file storage
"""
return self.get_last_series(site_id, KEY_ERROR)
def get_number_of_error(self, site_id, day_date):
"""This method retrieve number of error published for a day_date
"""
element = KEY_ERROR
site_folder = "%s/%s" % (self.folder_path, site_id)
filename = "%s.tsv.%s" % (element, day_date)
file_path = "%s/%s" % (site_folder, filename)
if exists(site_folder) and exists(file_path):
return get_lines_number(file_path)
return 0
def get_number_of_series(self, site_id, day_date):
"""This method retrieve number of series published for a day_date
"""
site_folder = "%s/%s" % (self.folder_path, site_id)
series = []
if exists(site_folder):
for filename in listdir(site_folder):
if "%s.tsv" % KEY_ERROR not in filename and day_date in filename:
file_path = "%s/%s" % (site_folder, filename)
var_id = filename.replace(".tsv.%s" % day_date, "")
series_numbers = get_lines_number(file_path)
series.append([var_id, series_numbers])
return series
def set_data_location(self, folder_path):
"""Set data folder space"""
self.folder_path = folder_path
def get_sites_list(self):
"""Get sites list"""
return os.listdir(self.folder_path)
FILE_STORAGE_SPACE = FileStorage(getattr(settings, SETTINGS_FILE_STORAGE_FOLDER, SETTINGS_DEFAULT_FILE_STORAGE_FOLDER))
|
[
"os.path.exists",
"os.listdir",
"os.makedirs",
"timevortex.utils.globals.LOGGER.debug",
"datetime.datetime.strptime",
"timevortex.utils.globals.LOGGER.error",
"datetime.datetime.utcnow",
"os.path.join"
] |
[((928, 948), 'os.listdir', 'listdir', (['site_folder'], {}), '(site_folder)\n', (935, 948), False, 'from os import listdir, makedirs\n'), ((1590, 1610), 'os.listdir', 'listdir', (['site_folder'], {}), '(site_folder)\n', (1597, 1610), False, 'from os import listdir, makedirs\n'), ((2062, 2101), 'datetime.datetime.strptime', 'datetime.strptime', (['new_date', '"""%Y-%m-%d"""'], {}), "(new_date, '%Y-%m-%d')\n", (2079, 2101), False, 'from datetime import datetime\n'), ((3692, 3713), 'timevortex.utils.globals.LOGGER.error', 'LOGGER.error', (['message'], {}), '(message)\n', (3704, 3713), False, 'from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE\n'), ((4253, 4272), 'timevortex.utils.globals.LOGGER.error', 'LOGGER.error', (['error'], {}), '(error)\n', (4265, 4272), False, 'from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE\n'), ((4566, 4585), 'os.path.exists', 'exists', (['site_folder'], {}), '(site_folder)\n', (4572, 4585), False, 'from os.path import isfile, join, exists\n'), ((4980, 4999), 'os.path.exists', 'exists', (['site_folder'], {}), '(site_folder)\n', (4986, 4999), False, 'from os.path import isfile, join, exists\n'), ((6700, 6719), 'os.path.exists', 'exists', (['site_folder'], {}), '(site_folder)\n', (6706, 6719), False, 'from os.path import isfile, join, exists\n'), ((7336, 7364), 'os.listdir', 'os.listdir', (['self.folder_path'], {}), '(self.folder_path)\n', (7346, 7364), False, 'import os\n'), ((975, 1002), 'os.path.join', 'join', (['site_folder', 'filename'], {}), '(site_folder, filename)\n', (979, 1002), False, 'from os.path import isfile, join, exists\n'), ((1637, 1668), 'os.path.join', 'join', (['site_folder', 'new_filename'], {}), '(site_folder, new_filename)\n', (1641, 1668), False, 'from os.path import isfile, join, exists\n'), ((2227, 2257), 'timevortex.utils.globals.LOGGER.error', 'LOGGER.error', (['"""Not right file"""'], {}), "('Not right file')\n", (2239, 2257), False, 'from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE\n'), ((2510, 2534), 'os.path.exists', 'exists', (['self.folder_path'], {}), '(self.folder_path)\n', (2516, 2534), False, 'from os.path import isfile, join, exists\n'), ((2548, 2574), 'os.makedirs', 'makedirs', (['self.folder_path'], {}), '(self.folder_path)\n', (2556, 2574), False, 'from os import listdir, makedirs\n'), ((3066, 3085), 'os.path.exists', 'exists', (['file_folder'], {}), '(file_folder)\n', (3072, 3085), False, 'from os.path import isfile, join, exists\n'), ((3099, 3120), 'os.makedirs', 'makedirs', (['file_folder'], {}), '(file_folder)\n', (3107, 3120), False, 'from os import listdir, makedirs\n'), ((5364, 5382), 'timevortex.utils.globals.LOGGER.debug', 'LOGGER.debug', (['last'], {}), '(last)\n', (5376, 5382), False, 'from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE\n'), ((6359, 6378), 'os.path.exists', 'exists', (['site_folder'], {}), '(site_folder)\n', (6365, 6378), False, 'from os.path import isfile, join, exists\n'), ((6383, 6400), 'os.path.exists', 'exists', (['file_path'], {}), '(file_path)\n', (6389, 6400), False, 'from os.path import isfile, join, exists\n'), ((6749, 6769), 'os.listdir', 'listdir', (['site_folder'], {}), '(site_folder)\n', (6756, 6769), False, 'from os import listdir, makedirs\n'), ((4116, 4133), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4131, 4133), False, 'from datetime import datetime\n')]
|
import tensorflow as tf
from typing import Optional
from tf_fourier_features import fourier_features
class FourierFeatureMLP(tf.keras.Model):
def __init__(self, units: int, final_units: int, gaussian_projection: Optional[int],
activation: str = 'relu',
final_activation: str = "linear",
num_layers: int = 1,
gaussian_scale: float = 1.0,
use_bias: bool = True, **kwargs):
"""
Fourier Feature Projection model from the paper
[Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/).
Used to create a multi-layer MLP with optional FourierFeatureProjection layer.
Args:
units: Number of hidden units in the intermediate layers.
final_units: Number of hidden units in the final layer.
activation: Activation in the hidden layers.
final_activation: Activation function of the final layer.
num_layers: Number of layers in the network.
gaussian_projection: Projection dimension for the gaussian kernel in fourier feature
projection layer. Can be None, negative or positive integer.
If None, then fourier feature map layer is not used.
If <=0, uses identity matrix (basic projection) without gaussian kernel.
If >=1, uses gaussian projection matrix of specified dim.
gaussian_scale: Scale of the gaussian kernel in fourier feature projection layer.
Note: If the scale is too small, convergence will slow down and obtain poor results.
If the scale is too large (>50), convergence will be fast but results will be grainy.
Try grid search for scales in the range [10 - 50].
use_bias: Boolean whether to use bias or not.
# References:
- [Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/)
"""
super().__init__(**kwargs)
layers = []
if gaussian_projection is not None:
layers.append(fourier_features.FourierFeatureProjection(
gaussian_projection=gaussian_projection,
gaussian_scale=gaussian_scale,
**kwargs
))
for _ in range(num_layers - 1):
layers.append(tf.keras.layers.Dense(units, activation=activation, use_bias=use_bias,
bias_initializer='he_uniform', **kwargs))
self.network = tf.keras.Sequential(layers)
self.final_dense = tf.keras.layers.Dense(final_units, activation=final_activation,
use_bias=use_bias, bias_initializer='he_uniform', **kwargs)
def call(self, inputs, training=None, mask=None):
features = self.network(inputs)
output = self.final_dense(features)
return output
|
[
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.Dense",
"tf_fourier_features.fourier_features.FourierFeatureProjection"
] |
[((2692, 2719), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['layers'], {}), '(layers)\n', (2711, 2719), True, 'import tensorflow as tf\n'), ((2747, 2875), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['final_units'], {'activation': 'final_activation', 'use_bias': 'use_bias', 'bias_initializer': '"""he_uniform"""'}), "(final_units, activation=final_activation, use_bias=\n use_bias, bias_initializer='he_uniform', **kwargs)\n", (2768, 2875), True, 'import tensorflow as tf\n'), ((2253, 2381), 'tf_fourier_features.fourier_features.FourierFeatureProjection', 'fourier_features.FourierFeatureProjection', ([], {'gaussian_projection': 'gaussian_projection', 'gaussian_scale': 'gaussian_scale'}), '(gaussian_projection=\n gaussian_projection, gaussian_scale=gaussian_scale, **kwargs)\n', (2294, 2381), False, 'from tf_fourier_features import fourier_features\n'), ((2507, 2622), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['units'], {'activation': 'activation', 'use_bias': 'use_bias', 'bias_initializer': '"""he_uniform"""'}), "(units, activation=activation, use_bias=use_bias,\n bias_initializer='he_uniform', **kwargs)\n", (2528, 2622), True, 'import tensorflow as tf\n')]
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from npu_bridge.npu_init import *
import unittest
import numpy as np
from avod.datasets.kitti import kitti_aug
class KittiAugTest(unittest.TestCase):
def test_flip_boxes_3d(self):
boxes_3d = np.array([
[1, 2, 3, 4, 5, 6, np.pi / 4],
[1, 2, 3, 4, 5, 6, -np.pi / 4]
])
exp_flipped_boxes_3d = np.array([
[-1, 2, 3, 4, 5, 6, 3 * np.pi / 4],
[-1, 2, 3, 4, 5, 6, -3 * np.pi / 4]
])
flipped_boxes_3d = kitti_aug.flip_boxes_3d(boxes_3d)
np.testing.assert_almost_equal(flipped_boxes_3d, exp_flipped_boxes_3d)
|
[
"numpy.array",
"avod.datasets.kitti.kitti_aug.flip_boxes_3d",
"numpy.testing.assert_almost_equal"
] |
[((1490, 1563), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5, 6, np.pi / 4], [1, 2, 3, 4, 5, 6, -np.pi / 4]]'], {}), '([[1, 2, 3, 4, 5, 6, np.pi / 4], [1, 2, 3, 4, 5, 6, -np.pi / 4]])\n', (1498, 1563), True, 'import numpy as np\n'), ((1630, 1718), 'numpy.array', 'np.array', (['[[-1, 2, 3, 4, 5, 6, 3 * np.pi / 4], [-1, 2, 3, 4, 5, 6, -3 * np.pi / 4]]'], {}), '([[-1, 2, 3, 4, 5, 6, 3 * np.pi / 4], [-1, 2, 3, 4, 5, 6, -3 * np.\n pi / 4]])\n', (1638, 1718), True, 'import numpy as np\n'), ((1776, 1809), 'avod.datasets.kitti.kitti_aug.flip_boxes_3d', 'kitti_aug.flip_boxes_3d', (['boxes_3d'], {}), '(boxes_3d)\n', (1799, 1809), False, 'from avod.datasets.kitti import kitti_aug\n'), ((1819, 1889), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['flipped_boxes_3d', 'exp_flipped_boxes_3d'], {}), '(flipped_boxes_3d, exp_flipped_boxes_3d)\n', (1849, 1889), True, 'import numpy as np\n')]
|
from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools
from conans.errors import ConanInvalidConfiguration
import os
import shutil
required_conan_version = ">=1.33.0"
class LibStudXmlConan(ConanFile):
name = "libstudxml"
description = "A streaming XML pull parser and streaming XML serializer implementation for modern, standard C++."
topics = ("xml", "xml-parser", "serialization")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.codesynthesis.com/projects/libstudxml/"
license = "MIT"
settings = "os", "compiler", "build_type", "arch"
exports_sources = "patches/*"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("expat/2.4.1")
def validate(self):
if self.settings.compiler == "Visual Studio":
if tools.Version(self.settings.compiler.version) < "9":
raise ConanInvalidConfiguration("Visual Studio {} is not supported.".format(self.settings.compiler.version))
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def build_requirements(self):
if self.settings.compiler != "Visual Studio":
self.build_requires("gnu-config/cci.20201022")
self.build_requires("libtool/2.4.6")
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_autotools(self):
if not self._autotools:
args = ["--with-external-expat"]
if self.options.shared:
args.extend(["--enable-shared", "--disable-static"])
else:
args.extend(["--disable-shared", "--enable-static"])
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
self._autotools.configure(configure_dir=self._source_subfolder, args=args)
return self._autotools
def _build_vs(self):
vc_ver = int(tools.Version(self.settings.compiler.version).major)
sln_path = None
def get_sln_path():
return os.path.join(self._source_subfolder, "libstudxml-vc{}.sln".format(vc_ver))
sln_path = get_sln_path()
while not os.path.exists(sln_path):
vc_ver -= 1
sln_path = get_sln_path()
proj_path = os.path.join(self._source_subfolder, "xml", "libstudxml-vc{}.vcxproj".format(vc_ver))
if not self.options.shared:
tools.replace_in_file(proj_path, "DynamicLibrary", "StaticLibrary")
tools.replace_in_file(proj_path, "LIBSTUDXML_DYNAMIC_LIB", "LIBSTUDXML_STATIC_LIB")
msbuild = MSBuild(self)
msbuild.build(sln_path, platforms={"x86": "Win32"})
@property
def _user_info_build(self):
return getattr(self, "user_info_build", self.deps_user_info)
def _build_autotools(self):
shutil.copy(self._user_info_build["gnu-config"].CONFIG_SUB,
os.path.join(self._source_subfolder, "config", "config.sub"))
shutil.copy(self._user_info_build["gnu-config"].CONFIG_GUESS,
os.path.join(self._source_subfolder, "config", "config.guess"))
if self.settings.compiler.get_safe("libcxx") == "libc++":
# libc++ includes a file called 'version', and since libstudxml adds source_subfolder as an
# include dir, libc++ ends up including their 'version' file instead, causing a compile error
tools.remove_files_by_mask(self._source_subfolder, "version")
with tools.chdir(self._source_subfolder):
self.run("{} -fiv".format(tools.get_env("AUTORECONF")), win_bash=tools.os_info.is_windows)
autotools = self._configure_autotools()
autotools.make()
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
if self.settings.compiler == "Visual Studio":
self._build_vs()
else:
self._build_autotools()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
if self.settings.compiler == "Visual Studio":
self.copy("xml/value-traits", dst="include", src=self._source_subfolder)
self.copy("xml/serializer", dst="include", src=self._source_subfolder)
self.copy("xml/qname", dst="include", src=self._source_subfolder)
self.copy("xml/parser", dst="include", src=self._source_subfolder)
self.copy("xml/forward", dst="include", src=self._source_subfolder)
self.copy("xml/exception", dst="include", src=self._source_subfolder)
self.copy("xml/content", dst="include", src=self._source_subfolder)
self.copy("xml/*.ixx", dst="include", src=self._source_subfolder)
self.copy("xml/*.txx", dst="include", src=self._source_subfolder)
self.copy("xml/*.hxx", dst="include", src=self._source_subfolder)
self.copy("xml/*.h", dst="include", src=self._source_subfolder)
suffix = ""
if self.settings.arch == "x86_64":
suffix = "64"
if self.options.shared:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "lib" + suffix))
self.copy("*.dll", dst="bin", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
autotools = self._configure_autotools()
autotools.install()
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "libstudxml.la")
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.names["pkg_config"] = "libstudxml"
# If built with makefile, static library mechanism is provided by their buildsystem already
if self.settings.compiler == "Visual Studio" and not self.options.shared:
self.cpp_info.defines = ["LIBSTUDXML_STATIC_LIB=1"]
|
[
"os.path.exists",
"conans.tools.replace_in_file",
"conans.tools.Version",
"conans.tools.remove_files_by_mask",
"os.path.join",
"conans.tools.patch",
"conans.tools.get_env",
"conans.tools.chdir",
"conans.tools.get",
"conans.AutoToolsBuildEnvironment",
"conans.MSBuild",
"conans.tools.collect_libs"
] |
[((1943, 2054), 'conans.tools.get', 'tools.get', ([], {'destination': 'self._source_subfolder', 'strip_root': '(True)'}), "(**self.conan_data['sources'][self.version], destination=self.\n _source_subfolder, strip_root=True)\n", (1952, 2054), False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((3316, 3329), 'conans.MSBuild', 'MSBuild', (['self'], {}), '(self)\n', (3323, 3329), False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((6595, 6619), 'conans.tools.collect_libs', 'tools.collect_libs', (['self'], {}), '(self)\n', (6613, 6619), False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((2405, 2471), 'conans.AutoToolsBuildEnvironment', 'AutoToolsBuildEnvironment', (['self'], {'win_bash': 'tools.os_info.is_windows'}), '(self, win_bash=tools.os_info.is_windows)\n', (2430, 2471), False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((2889, 2913), 'os.path.exists', 'os.path.exists', (['sln_path'], {}), '(sln_path)\n', (2903, 2913), False, 'import os\n'), ((3133, 3200), 'conans.tools.replace_in_file', 'tools.replace_in_file', (['proj_path', '"""DynamicLibrary"""', '"""StaticLibrary"""'], {}), "(proj_path, 'DynamicLibrary', 'StaticLibrary')\n", (3154, 3200), False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((3213, 3300), 'conans.tools.replace_in_file', 'tools.replace_in_file', (['proj_path', '"""LIBSTUDXML_DYNAMIC_LIB"""', '"""LIBSTUDXML_STATIC_LIB"""'], {}), "(proj_path, 'LIBSTUDXML_DYNAMIC_LIB',\n 'LIBSTUDXML_STATIC_LIB')\n", (3234, 3300), False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((3627, 3687), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""config"""', '"""config.sub"""'], {}), "(self._source_subfolder, 'config', 'config.sub')\n", (3639, 3687), False, 'import os\n'), ((3779, 3841), 'os.path.join', 'os.path.join', (['self._source_subfolder', '"""config"""', '"""config.guess"""'], {}), "(self._source_subfolder, 'config', 'config.guess')\n", (3791, 3841), False, 'import os\n'), ((4132, 4193), 'conans.tools.remove_files_by_mask', 'tools.remove_files_by_mask', (['self._source_subfolder', '"""version"""'], {}), "(self._source_subfolder, 'version')\n", (4158, 4193), False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((4208, 4243), 'conans.tools.chdir', 'tools.chdir', (['self._source_subfolder'], {}), '(self._source_subfolder)\n', (4219, 4243), False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((4535, 4555), 'conans.tools.patch', 'tools.patch', ([], {}), '(**patch)\n', (4546, 4555), False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((1279, 1324), 'conans.tools.Version', 'tools.Version', (['self.settings.compiler.version'], {}), '(self.settings.compiler.version)\n', (1292, 1324), False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((2637, 2682), 'conans.tools.Version', 'tools.Version', (['self.settings.compiler.version'], {}), '(self.settings.compiler.version)\n', (2650, 2682), False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((6331, 6371), 'os.path.join', 'os.path.join', (['self.package_folder', '"""lib"""'], {}), "(self.package_folder, 'lib')\n", (6343, 6371), False, 'import os\n'), ((6414, 6467), 'os.path.join', 'os.path.join', (['self.package_folder', '"""lib"""', '"""pkgconfig"""'], {}), "(self.package_folder, 'lib', 'pkgconfig')\n", (6426, 6467), False, 'import os\n'), ((6493, 6535), 'os.path.join', 'os.path.join', (['self.package_folder', '"""share"""'], {}), "(self.package_folder, 'share')\n", (6505, 6535), False, 'import os\n'), ((1822, 1854), 'conans.tools.get_env', 'tools.get_env', (['"""CONAN_BASH_PATH"""'], {}), "('CONAN_BASH_PATH')\n", (1835, 1854), False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((4283, 4310), 'conans.tools.get_env', 'tools.get_env', (['"""AUTORECONF"""'], {}), "('AUTORECONF')\n", (4296, 4310), False, 'from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools\n'), ((5914, 5966), 'os.path.join', 'os.path.join', (['self._source_subfolder', "('lib' + suffix)"], {}), "(self._source_subfolder, 'lib' + suffix)\n", (5926, 5966), False, 'import os\n'), ((6018, 6070), 'os.path.join', 'os.path.join', (['self._source_subfolder', "('bin' + suffix)"], {}), "(self._source_subfolder, 'bin' + suffix)\n", (6030, 6070), False, 'import os\n'), ((6140, 6192), 'os.path.join', 'os.path.join', (['self._source_subfolder', "('bin' + suffix)"], {}), "(self._source_subfolder, 'bin' + suffix)\n", (6152, 6192), False, 'import os\n')]
|
# Copyright 2020 <NAME> & <NAME> (<EMAIL>)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
class WebCariA:
def __init__(self, dataType, modelType, parse, des_attri=None):
self.dir_path = "/data/jw/dataset/" + str(parse)
self.dataType = dataType
self.parse = parse
self.des_attri = des_attri
if self.dataType == 'train':
if self.parse == 'Caricature':
self.subPath = 'CariTrain'
elif self.parse == 'Photo':
self.subPath = 'PhotoTrain'
else:
self.subPath = 'WebCariTrain'
elif self.dataType == 'val':
if self.parse == 'Caricature':
self.subPath = 'CariVal'
elif self.parse == 'Photo':
self.subPath = 'PhotoVal'
else:
self.subPath = 'WebCariVal'
elif self.dataType == 'test':
if self.parse == 'Caricature':
self.subPath = 'CariTest'
elif self.parse == 'Photo':
self.subPath = 'PhotoTest'
else:
self.subPath = 'WebCariTest'
elif self.dataType == 'all_data':
if self.parse == 'Caricature':
self.subPath = 'all_cari_data'
elif self.parse == 'Photo':
self.subPath = 'all_photo_data'
else:
self.subPath = 'all_WebCari_data'
else:
print("Caricature error, please select a dataType from: train, val, github")
exit(1)
self.modelType = modelType
self.dir_path = os.path.join(self.dir_path, self.subPath)
self.attributes = ['Women',
'Asian',
'White',
'Black',
'Youth',
'Middle',
'Old',
'Wrinkle',
'MakeUp',
'Bald',
'LargeForehead',
'RoundFace',
'DiamondFace',
'OvalFace',
'SquareShapeFace',
'NarrowEye',
'SleepyEye',
'SlantEye',
'SharpEye',
'FlabbyEye',
'BigEye',
'SmallEye',
'UnderEyePuffiness',
'BigNose',
'SmallNose',
'HighNose',
'FlatNose',
'HookNose',
'WideNose',
'NarrowNose',
'Toothy',
'Smile',
'BigMouth',
'SmallMouth',
'ThickLips',
'ThinLips',
'DoubleChin',
'ArchedEyebrows',
'FlatEyebrow',
'SlantedEyebrows',
'UpsideDownSlantedEyebrows',
'BushyEyebrows',
'ThickEyebrows',
'ThinEyebrows',
'Mustache',
'Goatee',
'Whiskers',
'OtherBeard&NoBeard',
'HighCheekbones',
'SquareJaw']
self.names, self.annas, self.visuals, self.num_attribute = self.getImgNameAndAnnas()
print(parse+"dataset, images: ", len(self.names), " type for: ", self.dataType, " num_attribute: ",
self.num_attribute)
def getImgNameAndAnnas(self):
names = []
annas = []
visuals = []
file = self.subPath+".txt"
file_v = self.subPath+"_V.txt"
fileList = open(os.path.join(self.dir_path, file)).readlines()
fileVList = open((os.path.join(self.dir_path, file_v))).readlines()
if self.modelType == 'seperate':
num_attribute = 1
attribute = self.des_attri
print("des_attribute", attribute)
if attribute not in self.attributes:
print("error: ", attribute, "is not in this dataset, please write a correct attribute in param")
exit(1)
for line in fileList:
names.append(line[0])
attributes = line[1::]
index = self.attributes.index(attribute)
annas.append([int(attributes[index])])
for line in fileVList:
attributes_v = line[1::]
index = self.attributes.index(attribute)
visuals.append([int(attributes_v[index])])
else:
for line in fileList:
names.append(line[0])
annas.append([int(x) for x in line[1::]])
for line in fileVList:
visuals.append([int(x) for x in line[1::]])
self.attributes = self.attributes
num_attribute = len(self.attributes)
return names, annas, visuals, num_attribute
def getPath(self, name):
name = name.replace(' ', '_')
name = name.replace('._', '_')
name = name.replace('-', '_')
name = name + ".jpg"
return name
|
[
"os.path.join"
] |
[((2205, 2246), 'os.path.join', 'os.path.join', (['self.dir_path', 'self.subPath'], {}), '(self.dir_path, self.subPath)\n', (2217, 2246), False, 'import os\n'), ((4750, 4783), 'os.path.join', 'os.path.join', (['self.dir_path', 'file'], {}), '(self.dir_path, file)\n', (4762, 4783), False, 'import os\n'), ((4824, 4859), 'os.path.join', 'os.path.join', (['self.dir_path', 'file_v'], {}), '(self.dir_path, file_v)\n', (4836, 4859), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import namedtuple
from typing import Dict, Iterable, List, Set, Tuple
from twisted.internet import defer
from synapse.api.constants import EventTypes
from synapse.storage._base import SQLBaseStore
from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore
from synapse.storage.database import Database
from synapse.storage.state import StateFilter
from synapse.types import StateMap
from synapse.util.caches.descriptors import cached
from synapse.util.caches.dictionary_cache import DictionaryCache
logger = logging.getLogger(__name__)
MAX_STATE_DELTA_HOPS = 100
class _GetStateGroupDelta(
namedtuple("_GetStateGroupDelta", ("prev_group", "delta_ids"))
):
"""Return type of get_state_group_delta that implements __len__, which lets
us use the itrable flag when caching
"""
__slots__ = []
def __len__(self):
return len(self.delta_ids) if self.delta_ids else 0
class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
"""A data store for fetching/storing state groups.
"""
def __init__(self, database: Database, db_conn, hs):
super(StateGroupDataStore, self).__init__(database, db_conn, hs)
# Originally the state store used a single DictionaryCache to cache the
# event IDs for the state types in a given state group to avoid hammering
# on the state_group* tables.
#
# The point of using a DictionaryCache is that it can cache a subset
# of the state events for a given state group (i.e. a subset of the keys for a
# given dict which is an entry in the cache for a given state group ID).
#
# However, this poses problems when performing complicated queries
# on the store - for instance: "give me all the state for this group, but
# limit members to this subset of users", as DictionaryCache's API isn't
# rich enough to say "please cache any of these fields, apart from this subset".
# This is problematic when lazy loading members, which requires this behaviour,
# as without it the cache has no choice but to speculatively load all
# state events for the group, which negates the efficiency being sought.
#
# Rather than overcomplicating DictionaryCache's API, we instead split the
# state_group_cache into two halves - one for tracking non-member events,
# and the other for tracking member_events. This means that lazy loading
# queries can be made in a cache-friendly manner by querying both caches
# separately and then merging the result. So for the example above, you
# would query the members cache for a specific subset of state keys
# (which DictionaryCache will handle efficiently and fine) and the non-members
# cache for all state (which DictionaryCache will similarly handle fine)
# and then just merge the results together.
#
# We size the non-members cache to be smaller than the members cache as the
# vast majority of state in Matrix (today) is member events.
self._state_group_cache = DictionaryCache(
"*stateGroupCache*",
# TODO: this hasn't been tuned yet
50000,
)
self._state_group_members_cache = DictionaryCache(
"*stateGroupMembersCache*", 500000,
)
@cached(max_entries=10000, iterable=True)
def get_state_group_delta(self, state_group):
"""Given a state group try to return a previous group and a delta between
the old and the new.
Returns:
(prev_group, delta_ids), where both may be None.
"""
def _get_state_group_delta_txn(txn):
prev_group = self.db.simple_select_one_onecol_txn(
txn,
table="state_group_edges",
keyvalues={"state_group": state_group},
retcol="prev_state_group",
allow_none=True,
)
if not prev_group:
return _GetStateGroupDelta(None, None)
delta_ids = self.db.simple_select_list_txn(
txn,
table="state_groups_state",
keyvalues={"state_group": state_group},
retcols=("type", "state_key", "event_id"),
)
return _GetStateGroupDelta(
prev_group,
{(row["type"], row["state_key"]): row["event_id"] for row in delta_ids},
)
return self.db.runInteraction(
"get_state_group_delta", _get_state_group_delta_txn
)
@defer.inlineCallbacks
def _get_state_groups_from_groups(
self, groups: List[int], state_filter: StateFilter
):
"""Returns the state groups for a given set of groups from the
database, filtering on types of state events.
Args:
groups: list of state group IDs to query
state_filter: The state filter used to fetch state
from the database.
Returns:
Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
"""
results = {}
chunks = [groups[i : i + 100] for i in range(0, len(groups), 100)]
for chunk in chunks:
res = yield self.db.runInteraction(
"_get_state_groups_from_groups",
self._get_state_groups_from_groups_txn,
chunk,
state_filter,
)
results.update(res)
return results
def _get_state_for_group_using_cache(self, cache, group, state_filter):
"""Checks if group is in cache. See `_get_state_for_groups`
Args:
cache(DictionaryCache): the state group cache to use
group(int): The state group to lookup
state_filter (StateFilter): The state filter used to fetch state
from the database.
Returns 2-tuple (`state_dict`, `got_all`).
`got_all` is a bool indicating if we successfully retrieved all
requests state from the cache, if False we need to query the DB for the
missing state.
"""
is_all, known_absent, state_dict_ids = cache.get(group)
if is_all or state_filter.is_full():
# Either we have everything or want everything, either way
# `is_all` tells us whether we've gotten everything.
return state_filter.filter_state(state_dict_ids), is_all
# tracks whether any of our requested types are missing from the cache
missing_types = False
if state_filter.has_wildcards():
# We don't know if we fetched all the state keys for the types in
# the filter that are wildcards, so we have to assume that we may
# have missed some.
missing_types = True
else:
# There aren't any wild cards, so `concrete_types()` returns the
# complete list of event types we're wanting.
for key in state_filter.concrete_types():
if key not in state_dict_ids and key not in known_absent:
missing_types = True
break
return state_filter.filter_state(state_dict_ids), not missing_types
@defer.inlineCallbacks
def _get_state_for_groups(
self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all()
):
"""Gets the state at each of a list of state groups, optionally
filtering by type/state_key
Args:
groups: list of state groups for which we want
to get the state.
state_filter: The state filter used to fetch state
from the database.
Returns:
Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
"""
member_filter, non_member_filter = state_filter.get_member_split()
# Now we look them up in the member and non-member caches
(
non_member_state,
incomplete_groups_nm,
) = yield self._get_state_for_groups_using_cache(
groups, self._state_group_cache, state_filter=non_member_filter
)
(
member_state,
incomplete_groups_m,
) = yield self._get_state_for_groups_using_cache(
groups, self._state_group_members_cache, state_filter=member_filter
)
state = dict(non_member_state)
for group in groups:
state[group].update(member_state[group])
# Now fetch any missing groups from the database
incomplete_groups = incomplete_groups_m | incomplete_groups_nm
if not incomplete_groups:
return state
cache_sequence_nm = self._state_group_cache.sequence
cache_sequence_m = self._state_group_members_cache.sequence
# Help the cache hit ratio by expanding the filter a bit
db_state_filter = state_filter.return_expanded()
group_to_state_dict = yield self._get_state_groups_from_groups(
list(incomplete_groups), state_filter=db_state_filter
)
# Now lets update the caches
self._insert_into_cache(
group_to_state_dict,
db_state_filter,
cache_seq_num_members=cache_sequence_m,
cache_seq_num_non_members=cache_sequence_nm,
)
# And finally update the result dict, by filtering out any extra
# stuff we pulled out of the database.
for group, group_state_dict in group_to_state_dict.items():
# We just replace any existing entries, as we will have loaded
# everything we need from the database anyway.
state[group] = state_filter.filter_state(group_state_dict)
return state
def _get_state_for_groups_using_cache(
self, groups: Iterable[int], cache: DictionaryCache, state_filter: StateFilter
) -> Tuple[Dict[int, StateMap[str]], Set[int]]:
"""Gets the state at each of a list of state groups, optionally
filtering by type/state_key, querying from a specific cache.
Args:
groups: list of state groups for which we want to get the state.
cache: the cache of group ids to state dicts which
we will pass through - either the normal state cache or the
specific members state cache.
state_filter: The state filter used to fetch state from the
database.
Returns:
Tuple of dict of state_group_id to state map of entries in the
cache, and the state group ids either missing from the cache or
incomplete.
"""
results = {}
incomplete_groups = set()
for group in set(groups):
state_dict_ids, got_all = self._get_state_for_group_using_cache(
cache, group, state_filter
)
results[group] = state_dict_ids
if not got_all:
incomplete_groups.add(group)
return results, incomplete_groups
def _insert_into_cache(
self,
group_to_state_dict,
state_filter,
cache_seq_num_members,
cache_seq_num_non_members,
):
"""Inserts results from querying the database into the relevant cache.
Args:
group_to_state_dict (dict): The new entries pulled from database.
Map from state group to state dict
state_filter (StateFilter): The state filter used to fetch state
from the database.
cache_seq_num_members (int): Sequence number of member cache since
last lookup in cache
cache_seq_num_non_members (int): Sequence number of member cache since
last lookup in cache
"""
# We need to work out which types we've fetched from the DB for the
# member vs non-member caches. This should be as accurate as possible,
# but can be an underestimate (e.g. when we have wild cards)
member_filter, non_member_filter = state_filter.get_member_split()
if member_filter.is_full():
# We fetched all member events
member_types = None
else:
# `concrete_types()` will only return a subset when there are wild
# cards in the filter, but that's fine.
member_types = member_filter.concrete_types()
if non_member_filter.is_full():
# We fetched all non member events
non_member_types = None
else:
non_member_types = non_member_filter.concrete_types()
for group, group_state_dict in group_to_state_dict.items():
state_dict_members = {}
state_dict_non_members = {}
for k, v in group_state_dict.items():
if k[0] == EventTypes.Member:
state_dict_members[k] = v
else:
state_dict_non_members[k] = v
self._state_group_members_cache.update(
cache_seq_num_members,
key=group,
value=state_dict_members,
fetched_keys=member_types,
)
self._state_group_cache.update(
cache_seq_num_non_members,
key=group,
value=state_dict_non_members,
fetched_keys=non_member_types,
)
def store_state_group(
self, event_id, room_id, prev_group, delta_ids, current_state_ids
):
"""Store a new set of state, returning a newly assigned state group.
Args:
event_id (str): The event ID for which the state was calculated
room_id (str)
prev_group (int|None): A previous state group for the room, optional.
delta_ids (dict|None): The delta between state at `prev_group` and
`current_state_ids`, if `prev_group` was given. Same format as
`current_state_ids`.
current_state_ids (dict): The state to store. Map of (type, state_key)
to event_id.
Returns:
Deferred[int]: The state group ID
"""
def _store_state_group_txn(txn):
if current_state_ids is None:
# AFAIK, this can never happen
raise Exception("current_state_ids cannot be None")
state_group = self.database_engine.get_next_state_group_id(txn)
self.db.simple_insert_txn(
txn,
table="state_groups",
values={"id": state_group, "room_id": room_id, "event_id": event_id},
)
# We persist as a delta if we can, while also ensuring the chain
# of deltas isn't tooo long, as otherwise read performance degrades.
if prev_group:
is_in_db = self.db.simple_select_one_onecol_txn(
txn,
table="state_groups",
keyvalues={"id": prev_group},
retcol="id",
allow_none=True,
)
if not is_in_db:
raise Exception(
"Trying to persist state with unpersisted prev_group: %r"
% (prev_group,)
)
potential_hops = self._count_state_group_hops_txn(txn, prev_group)
if prev_group and potential_hops < MAX_STATE_DELTA_HOPS:
self.db.simple_insert_txn(
txn,
table="state_group_edges",
values={"state_group": state_group, "prev_state_group": prev_group},
)
self.db.simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": state_group,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in delta_ids.items()
],
)
else:
self.db.simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": state_group,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in current_state_ids.items()
],
)
# Prefill the state group caches with this group.
# It's fine to use the sequence like this as the state group map
# is immutable. (If the map wasn't immutable then this prefill could
# race with another update)
current_member_state_ids = {
s: ev
for (s, ev) in current_state_ids.items()
if s[0] == EventTypes.Member
}
txn.call_after(
self._state_group_members_cache.update,
self._state_group_members_cache.sequence,
key=state_group,
value=dict(current_member_state_ids),
)
current_non_member_state_ids = {
s: ev
for (s, ev) in current_state_ids.items()
if s[0] != EventTypes.Member
}
txn.call_after(
self._state_group_cache.update,
self._state_group_cache.sequence,
key=state_group,
value=dict(current_non_member_state_ids),
)
return state_group
return self.db.runInteraction("store_state_group", _store_state_group_txn)
def purge_unreferenced_state_groups(
self, room_id: str, state_groups_to_delete
) -> defer.Deferred:
"""Deletes no longer referenced state groups and de-deltas any state
groups that reference them.
Args:
room_id: The room the state groups belong to (must all be in the
same room).
state_groups_to_delete (Collection[int]): Set of all state groups
to delete.
"""
return self.db.runInteraction(
"purge_unreferenced_state_groups",
self._purge_unreferenced_state_groups,
room_id,
state_groups_to_delete,
)
def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete):
logger.info(
"[purge] found %i state groups to delete", len(state_groups_to_delete)
)
rows = self.db.simple_select_many_txn(
txn,
table="state_group_edges",
column="prev_state_group",
iterable=state_groups_to_delete,
keyvalues={},
retcols=("state_group",),
)
remaining_state_groups = {
row["state_group"]
for row in rows
if row["state_group"] not in state_groups_to_delete
}
logger.info(
"[purge] de-delta-ing %i remaining state groups",
len(remaining_state_groups),
)
# Now we turn the state groups that reference to-be-deleted state
# groups to non delta versions.
for sg in remaining_state_groups:
logger.info("[purge] de-delta-ing remaining state group %s", sg)
curr_state = self._get_state_groups_from_groups_txn(txn, [sg])
curr_state = curr_state[sg]
self.db.simple_delete_txn(
txn, table="state_groups_state", keyvalues={"state_group": sg}
)
self.db.simple_delete_txn(
txn, table="state_group_edges", keyvalues={"state_group": sg}
)
self.db.simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": sg,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in curr_state.items()
],
)
logger.info("[purge] removing redundant state groups")
txn.executemany(
"DELETE FROM state_groups_state WHERE state_group = ?",
((sg,) for sg in state_groups_to_delete),
)
txn.executemany(
"DELETE FROM state_groups WHERE id = ?",
((sg,) for sg in state_groups_to_delete),
)
@defer.inlineCallbacks
def get_previous_state_groups(self, state_groups):
"""Fetch the previous groups of the given state groups.
Args:
state_groups (Iterable[int])
Returns:
Deferred[dict[int, int]]: mapping from state group to previous
state group.
"""
rows = yield self.db.simple_select_many_batch(
table="state_group_edges",
column="prev_state_group",
iterable=state_groups,
keyvalues={},
retcols=("prev_state_group", "state_group"),
desc="get_previous_state_groups",
)
return {row["state_group"]: row["prev_state_group"] for row in rows}
def purge_room_state(self, room_id, state_groups_to_delete):
"""Deletes all record of a room from state tables
Args:
room_id (str):
state_groups_to_delete (list[int]): State groups to delete
"""
return self.db.runInteraction(
"purge_room_state",
self._purge_room_state_txn,
room_id,
state_groups_to_delete,
)
def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete):
# first we have to delete the state groups states
logger.info("[purge] removing %s from state_groups_state", room_id)
self.db.simple_delete_many_txn(
txn,
table="state_groups_state",
column="state_group",
iterable=state_groups_to_delete,
keyvalues={},
)
# ... and the state group edges
logger.info("[purge] removing %s from state_group_edges", room_id)
self.db.simple_delete_many_txn(
txn,
table="state_group_edges",
column="state_group",
iterable=state_groups_to_delete,
keyvalues={},
)
# ... and the state groups
logger.info("[purge] removing %s from state_groups", room_id)
self.db.simple_delete_many_txn(
txn,
table="state_groups",
column="id",
iterable=state_groups_to_delete,
keyvalues={},
)
|
[
"logging.getLogger",
"collections.namedtuple",
"synapse.storage.state.StateFilter.all",
"synapse.util.caches.descriptors.cached",
"synapse.util.caches.dictionary_cache.DictionaryCache"
] |
[((1176, 1203), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1193, 1203), False, 'import logging\n'), ((1266, 1328), 'collections.namedtuple', 'namedtuple', (['"""_GetStateGroupDelta"""', "('prev_group', 'delta_ids')"], {}), "('_GetStateGroupDelta', ('prev_group', 'delta_ids'))\n", (1276, 1328), False, 'from collections import namedtuple\n'), ((4032, 4072), 'synapse.util.caches.descriptors.cached', 'cached', ([], {'max_entries': '(10000)', 'iterable': '(True)'}), '(max_entries=10000, iterable=True)\n', (4038, 4072), False, 'from synapse.util.caches.descriptors import cached\n'), ((3783, 3826), 'synapse.util.caches.dictionary_cache.DictionaryCache', 'DictionaryCache', (['"""*stateGroupCache*"""', '(50000)'], {}), "('*stateGroupCache*', 50000)\n", (3798, 3826), False, 'from synapse.util.caches.dictionary_cache import DictionaryCache\n'), ((3951, 4002), 'synapse.util.caches.dictionary_cache.DictionaryCache', 'DictionaryCache', (['"""*stateGroupMembersCache*"""', '(500000)'], {}), "('*stateGroupMembersCache*', 500000)\n", (3966, 4002), False, 'from synapse.util.caches.dictionary_cache import DictionaryCache\n'), ((8064, 8081), 'synapse.storage.state.StateFilter.all', 'StateFilter.all', ([], {}), '()\n', (8079, 8081), False, 'from synapse.storage.state import StateFilter\n')]
|
# Generated by Django 2.2.6 on 2020-02-09 12:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0010_auto_20200130_1135'),
]
operations = [
migrations.CreateModel(
name='Variation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Item')),
],
options={
'unique_together': {('item', 'name')},
},
),
migrations.CreateModel(
name='ItemVariation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=50)),
('attachment', models.ImageField(upload_to='variations/')),
('variation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Variation')),
],
options={
'unique_together': {('variation', 'value')},
},
),
]
|
[
"django.db.models.ImageField",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] |
[((363, 456), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (379, 456), False, 'from django.db import migrations, models\n'), ((480, 511), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (496, 511), False, 'from django.db import migrations, models\n'), ((539, 617), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.Item"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='core.Item')\n", (556, 617), False, 'from django.db import migrations, models\n'), ((848, 941), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (864, 941), False, 'from django.db import migrations, models\n'), ((966, 997), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (982, 997), False, 'from django.db import migrations, models\n'), ((1031, 1073), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""variations/"""'}), "(upload_to='variations/')\n", (1048, 1073), False, 'from django.db import migrations, models\n'), ((1106, 1194), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.Variation"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'core.Variation')\n", (1123, 1194), False, 'from django.db import migrations, models\n')]
|
import os
from mnist import model
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets("data/dataset/", one_hot=True)
# model
with tf.variable_scope("convolutional"):
x = tf.placeholder(tf.float32, [None, 784])
keep_prob = tf.placeholder(tf.float32)
y, variables = model.convolutional(x, keep_prob)
# train
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver(variables)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = data.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g" % (i, train_accuracy))
sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print(sess.run(accuracy, feed_dict={x: data.test.images, y_: data.test.labels, keep_prob: 1.0}))
path = saver.save(
sess, os.path.join(os.path.dirname(__file__), 'data', 'convolutional.ckpt'),
write_meta_graph=False, write_state=False)
print("Saved:", path)
|
[
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.train.Saver",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.argmax",
"os.path.dirname",
"mnist.model.convolutional",
"tensorflow.train.AdamOptimizer",
"tensorflow.cast",
"tensorflow.log"
] |
[((126, 182), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""data/dataset/"""'], {'one_hot': '(True)'}), "('data/dataset/', one_hot=True)\n", (151, 182), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((391, 429), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 10]'], {}), '(tf.float32, [None, 10])\n', (405, 429), True, 'import tensorflow as tf\n'), ((684, 709), 'tensorflow.train.Saver', 'tf.train.Saver', (['variables'], {}), '(variables)\n', (698, 709), True, 'import tensorflow as tf\n'), ((197, 231), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""convolutional"""'], {}), "('convolutional')\n", (214, 231), True, 'import tensorflow as tf\n'), ((241, 280), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 784]'], {}), '(tf.float32, [None, 784])\n', (255, 280), True, 'import tensorflow as tf\n'), ((297, 323), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (311, 323), True, 'import tensorflow as tf\n'), ((343, 376), 'mnist.model.convolutional', 'model.convolutional', (['x', 'keep_prob'], {}), '(x, keep_prob)\n', (362, 376), False, 'from mnist import model\n'), ((573, 588), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (582, 588), True, 'import tensorflow as tf\n'), ((590, 606), 'tensorflow.argmax', 'tf.argmax', (['y_', '(1)'], {}), '(y_, 1)\n', (599, 606), True, 'import tensorflow as tf\n'), ((634, 673), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (641, 673), True, 'import tensorflow as tf\n'), ((715, 727), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (725, 727), True, 'import tensorflow as tf\n'), ((490, 520), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.0001)'], {}), '(0.0001)\n', (512, 520), True, 'import tensorflow as tf\n'), ((750, 783), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (781, 783), True, 'import tensorflow as tf\n'), ((466, 475), 'tensorflow.log', 'tf.log', (['y'], {}), '(y)\n', (472, 475), True, 'import tensorflow as tf\n'), ((1287, 1312), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1302, 1312), False, 'import os\n')]
|
import requests
import codecs
query1 = """<union>
<query type="way">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:state"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="way">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:city"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="way">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:postcode"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:state"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:city"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:postcode"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>""" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 6)
r1 = requests.post('http://overpass-api.de/api/interpreter/', data=query1)
r1.encoding = 'utf-8'
f = codecs.open('data/osm_data.xml', encoding='utf-8' , mode='w+')
f.write(r1.text)
query2 = """<union>
<query type="way">
<has-kv k="addr:street"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:prefix"/>
<has-kv k="addr:street:type"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:street"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:prefix"/>
<has-kv k="addr:street:type"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>""" % ((-87.61309146881104, 41.890042371392965, 41.87234107841773, -87.64235973358154) * 2)
#r2 = requests.post('http://overpass-api.de/api/interpreter/', data=query2)
#f = codecs.open("data/osm_data_street.xml", "wb", "utf-8")
#r2.encoding = 'utf-8'
#f.write(r2.text)
query3 = """<union>
<query type="way">
<has-kv k="addr:full" regv="^[0-9]+.*[a-z]+.*[0-9]{5}.*"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:full" regv="^[0-9]+.*[a-z]+.*[0-9]{5}.*"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>
""" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 2)
if __name__ == '__main__' :
r3 = requests.post('http://overpass-api.de/api/interpreter/', data=query3)
f = codecs.open("data/osm_data_full_addr.xml", "wb", "utf-8")
r3.encoding = 'utf-8'
f.write(r3.text)
|
[
"codecs.open",
"requests.post"
] |
[((1389, 1458), 'requests.post', 'requests.post', (['"""http://overpass-api.de/api/interpreter/"""'], {'data': 'query1'}), "('http://overpass-api.de/api/interpreter/', data=query1)\n", (1402, 1458), False, 'import requests\n'), ((1486, 1547), 'codecs.open', 'codecs.open', (['"""data/osm_data.xml"""'], {'encoding': '"""utf-8"""', 'mode': '"""w+"""'}), "('data/osm_data.xml', encoding='utf-8', mode='w+')\n", (1497, 1547), False, 'import codecs\n'), ((2717, 2786), 'requests.post', 'requests.post', (['"""http://overpass-api.de/api/interpreter/"""'], {'data': 'query3'}), "('http://overpass-api.de/api/interpreter/', data=query3)\n", (2730, 2786), False, 'import requests\n'), ((2796, 2853), 'codecs.open', 'codecs.open', (['"""data/osm_data_full_addr.xml"""', '"""wb"""', '"""utf-8"""'], {}), "('data/osm_data_full_addr.xml', 'wb', 'utf-8')\n", (2807, 2853), False, 'import codecs\n')]
|
#!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image database."""
import _init_paths
from fast_rcnn.test import test_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
import pandas as pd
def splitall(path):
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network pipeline')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int, required=True)
parser.add_argument('--dir', dest='dir',
help='Directory of the model files',
default="", type=str, required=True)
parser.add_argument('--models', dest='model_files',
help='Text file with names of models',
default=None, type=str, required=True)
parser.add_argument('--prototxt', dest='prototxt',
help='prototxt', default=None, type=str, required=True)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='ped_test_small', type=str, required=True)
parser.add_argument('--cfg', dest='cfg_file',
help='cfg',
default='experiments/cfgs/faster_rcnn_end2end.yml', type=str)
parser.add_argument('--res', dest='res_file',
help='result file',
default='', type=str, required=True)
args = parser.parse_args()
return args
def run_test_net(gpu_id, caffemodel, prototxt, imdb_name, cfg_file):
if cfg_file is not None:
cfg_from_file(cfg_file)
cfg.GPU_ID = gpu_id
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(caffemodel):
print('Waiting for {} to exist...'.format(caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(caffemodel))[0]
imdb = get_imdb(imdb_name)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
n, _ = os.path.splitext(args.caffemodel)
paths = splitall(n)
proposal_prefix = paths[-1]
return test_net(net, imdb, max_per_image=100, vis=False, proposal_prefix=proposal_prefix)
def run_test_nets(gpu_id, dir, model_files, prototxt, imdb_name, cfg_file, res_file):
models = [line.rstrip('\n') for line in open(os.path.join(dir, model_files))]
df_results = pd.DataFrame()
for model in models:
results = run_test_net(gpu_id, os.path.join(dir, model), prototxt, imdb_name, cfg_file)
for result in results:
result['file'] = model
df_results = df_results.append(results, ignore_index=True)
df_results.to_csv(os.path.join(dir, res_file))
if __name__ == '__main__':
# args = parse_args()
gpu_id = 0
# dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup'
# model_files = 'test.txt'
args = parse_args()
print('Called with args:')
print(args)
run_test_nets(args.gpu_id, args.dir, args.model_files, args.prototxt, args.imdb_name, args.cfg_file, args.res_file)
# run_test_net(gpu_id,caffemodel, prototxt, imdb_name, cfg_file)
|
[
"fast_rcnn.config.cfg_from_file",
"os.path.exists",
"fast_rcnn.test.test_net",
"argparse.ArgumentParser",
"pandas.DataFrame",
"caffe.set_mode_gpu",
"caffe.set_device",
"os.path.splitext",
"os.path.split",
"time.sleep",
"os.path.join",
"os.path.basename",
"caffe.Net",
"datasets.factory.get_imdb",
"pprint.pprint"
] |
[((1067, 1140), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test a Fast R-CNN network pipeline"""'}), "(description='Test a Fast R-CNN network pipeline')\n", (1090, 1140), False, 'import argparse\n'), ((2493, 2511), 'pprint.pprint', 'pprint.pprint', (['cfg'], {}), '(cfg)\n', (2506, 2511), False, 'import pprint\n'), ((2646, 2666), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (2664, 2666), False, 'import caffe\n'), ((2671, 2695), 'caffe.set_device', 'caffe.set_device', (['gpu_id'], {}), '(gpu_id)\n', (2687, 2695), False, 'import caffe\n'), ((2706, 2749), 'caffe.Net', 'caffe.Net', (['prototxt', 'caffemodel', 'caffe.TEST'], {}), '(prototxt, caffemodel, caffe.TEST)\n', (2715, 2749), False, 'import caffe\n'), ((2827, 2846), 'datasets.factory.get_imdb', 'get_imdb', (['imdb_name'], {}), '(imdb_name)\n', (2835, 2846), False, 'from datasets.factory import get_imdb\n'), ((2947, 2980), 'os.path.splitext', 'os.path.splitext', (['args.caffemodel'], {}), '(args.caffemodel)\n', (2963, 2980), False, 'import time, os, sys\n'), ((3050, 3137), 'fast_rcnn.test.test_net', 'test_net', (['net', 'imdb'], {'max_per_image': '(100)', 'vis': '(False)', 'proposal_prefix': 'proposal_prefix'}), '(net, imdb, max_per_image=100, vis=False, proposal_prefix=\n proposal_prefix)\n', (3058, 3137), False, 'from fast_rcnn.test import test_net\n'), ((3320, 3334), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3332, 3334), True, 'import pandas as pd\n'), ((629, 648), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (642, 648), False, 'import time, os, sys\n'), ((2412, 2435), 'fast_rcnn.config.cfg_from_file', 'cfg_from_file', (['cfg_file'], {}), '(cfg_file)\n', (2425, 2435), False, 'from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list\n'), ((2527, 2553), 'os.path.exists', 'os.path.exists', (['caffemodel'], {}), '(caffemodel)\n', (2541, 2553), False, 'import time, os, sys\n'), ((2626, 2640), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (2636, 2640), False, 'import time, os, sys\n'), ((3612, 3639), 'os.path.join', 'os.path.join', (['dir', 'res_file'], {}), '(dir, res_file)\n', (3624, 3639), False, 'import time, os, sys\n'), ((2782, 2810), 'os.path.basename', 'os.path.basename', (['caffemodel'], {}), '(caffemodel)\n', (2798, 2810), False, 'import time, os, sys\n'), ((3399, 3423), 'os.path.join', 'os.path.join', (['dir', 'model'], {}), '(dir, model)\n', (3411, 3423), False, 'import time, os, sys\n'), ((3270, 3300), 'os.path.join', 'os.path.join', (['dir', 'model_files'], {}), '(dir, model_files)\n', (3282, 3300), False, 'import time, os, sys\n')]
|
import cv2
import numpy as np
from pycocotools.coco import COCO
import os
from ..dataloading import get_yolox_datadir
from .datasets_wrapper import Dataset
class MOTDataset(Dataset):
"""
COCO dataset class.
"""
def __init__( # This function is called in the exps yolox_x_mot17_half.py in this way: dataset = MOTDataset(
# data_dir=os.path.join(get_yolox_datadir(), "mot"),
# json_file=self.train_ann,
# name='train',
# img_size=self.input_size,
# preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406),
# std=(0.229, 0.224, 0.225),
# max_labels=500,),)
self,
data_dir=None,
json_file="train_half.json",
name="train",
img_size=(608, 1088),
preproc=None,
):
"""
COCO dataset initialization. Annotation data are read into memory by COCO API.
Args:
data_dir (str): dataset root directory
json_file (str): COCO json file name
name (str): COCO data name (e.g. 'train2017' or 'val2017')
img_size (int): target image size after pre-processing
preproc: data augmentation strategy
"""
super().__init__(img_size)
if data_dir is None:
data_dir = os.path.join(get_yolox_datadir(), "mot")
self.data_dir = data_dir
self.json_file = json_file
self.coco = COCO(os.path.join(self.data_dir, "annotations", self.json_file))
self.ids = self.coco.getImgIds()
self.class_ids = sorted(self.coco.getCatIds())
cats = self.coco.loadCats(self.coco.getCatIds())
self._classes = tuple([c["name"] for c in cats])
self.annotations = self._load_coco_annotations()
self.name = name
self.img_size = img_size
self.preproc = preproc
def __len__(self):
return len(self.ids)
def _load_coco_annotations(self):
return [self.load_anno_from_ids(_ids) for _ids in self.ids]
def load_anno_from_ids(self, id_):
im_ann = self.coco.loadImgs(id_)[0]
width = im_ann["width"]
height = im_ann["height"]
#frame_id = im_ann["frame_id"] : the default value '1' avoid to break augmentation & evaluation processes
frame_id = 1
#video_id = im_ann["video_id"] : the default value '1' avoid to break augmentation & evaluation processes
video_id = 1
anno_ids = self.coco.getAnnIds(imgIds=[int(id_)], iscrowd=False)
annotations = self.coco.loadAnns(anno_ids)
objs = []
for obj in annotations:
x1 = obj["bbox"][0]
y1 = obj["bbox"][1]
x2 = x1 + obj["bbox"][2]
y2 = y1 + obj["bbox"][3]
if obj["area"] > 0 and x2 >= x1 and y2 >= y1:
obj["clean_bbox"] = [x1, y1, x2, y2]
objs.append(obj)
num_objs = len(objs)
res = np.zeros((num_objs, 6))
for ix, obj in enumerate(objs):
cls = self.class_ids.index(obj["category_id"])
res[ix, 0:4] = obj["clean_bbox"]
res[ix, 4] = cls
#res[ix, 5] = obj["track_id"] # See comment line 66; same comment for the default value 1
res[ix, 5] = 1
file_name = im_ann["file_name"] if "file_name" in im_ann else "{:012}".format(id_) + ".jpg"
img_info = (height, width, frame_id, video_id, file_name)
del im_ann, annotations
return (res, img_info, file_name)
def load_anno(self, index):
return self.annotations[index][0]
def pull_item(self, index):
id_ = self.ids[index]
res, img_info, file_name = self.annotations[index]
# load image and preprocess
img_file = os.path.join(
self.data_dir, self.name, file_name
)
img = cv2.imread(img_file)
assert img is not None
return img, res.copy(), img_info, np.array([id_])
@Dataset.resize_getitem
def __getitem__(self, index):
"""
One image / label pair for the given index is picked up and pre-processed.
Args:
index (int): data index
Returns:
img (numpy.ndarray): pre-processed image
padded_labels (torch.Tensor): pre-processed label data.
The shape is :math:`[max_labels, 5]`.
each label consists of [class, xc, yc, w, h]:
class (float): class index.
xc, yc (float) : center of bbox whose values range from 0 to 1.
w, h (float) : size of bbox whose values range from 0 to 1.
info_img : tuple of h, w, nh, nw, dx, dy.
h, w (int): original shape of the image
nh, nw (int): shape of the resized image without padding
dx, dy (int): pad size
img_id (int): same as the input index. Used for evaluation.
"""
img, target, img_info, img_id = self.pull_item(index)
if self.preproc is not None:
img, target = self.preproc(img, target, self.input_dim)
return img, target, img_info, img_id
|
[
"numpy.array",
"numpy.zeros",
"os.path.join",
"cv2.imread"
] |
[((3616, 3639), 'numpy.zeros', 'np.zeros', (['(num_objs, 6)'], {}), '((num_objs, 6))\n', (3624, 3639), True, 'import numpy as np\n'), ((4439, 4488), 'os.path.join', 'os.path.join', (['self.data_dir', 'self.name', 'file_name'], {}), '(self.data_dir, self.name, file_name)\n', (4451, 4488), False, 'import os\n'), ((4525, 4545), 'cv2.imread', 'cv2.imread', (['img_file'], {}), '(img_file)\n', (4535, 4545), False, 'import cv2\n'), ((2119, 2177), 'os.path.join', 'os.path.join', (['self.data_dir', '"""annotations"""', 'self.json_file'], {}), "(self.data_dir, 'annotations', self.json_file)\n", (2131, 2177), False, 'import os\n'), ((4620, 4635), 'numpy.array', 'np.array', (['[id_]'], {}), '([id_])\n', (4628, 4635), True, 'import numpy as np\n')]
|
import argparse
import yaml
from subprocess import call
from train import train_bichrom
if __name__ == '__main__':
# parsing
parser = argparse.ArgumentParser(description='Train and Evaluate Bichrom')
parser.add_argument('-training_schema_yaml', required=True,
help='YAML file with paths to train, test and val data')
parser.add_argument('-len', help='Size of genomic windows',
required=True, type=int)
parser.add_argument('-outdir', required=True, help='Output directory')
parser.add_argument('-nbins', type=int, required=True, help='Number of bins')
args = parser.parse_args()
# load the yaml file with input data paths:
with open(args.training_schema_yaml, 'r') as f:
try:
data_paths = yaml.safe_load(f)
except yaml.YAMLError as exc:
print(exc)
# create the output directory:
outdir = args.outdir
call(['mkdir', outdir])
train_bichrom(data_paths=data_paths, outdir=outdir, seq_len=args.len,
bin_size=int(args.len/args.nbins))
|
[
"yaml.safe_load",
"subprocess.call",
"argparse.ArgumentParser"
] |
[((143, 208), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train and Evaluate Bichrom"""'}), "(description='Train and Evaluate Bichrom')\n", (166, 208), False, 'import argparse\n'), ((937, 960), 'subprocess.call', 'call', (["['mkdir', outdir]"], {}), "(['mkdir', outdir])\n", (941, 960), False, 'from subprocess import call\n'), ((794, 811), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (808, 811), False, 'import yaml\n')]
|
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by <NAME>
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import lxml.html as lxmlhtml
from lxml.html import soupparser
from lxml import etree
from copy import deepcopy
from goose.text import innerTrim
from goose.text import encodeValue
class Parser(object):
@classmethod
def xpath_re(self, node, expression):
regexp_namespace = "http://exslt.org/regular-expressions"
items = node.xpath(expression, namespaces={'re': regexp_namespace})
return items
@classmethod
def drop_tag(self, nodes):
if isinstance(nodes, list):
for node in nodes:
node.drop_tag()
else:
nodes.drop_tag()
@classmethod
def css_select(self, node, selector):
return node.cssselect(selector)
@classmethod
def fromstring(self, html):
html = encodeValue(html)
self.doc = lxmlhtml.fromstring(html)
return self.doc
@classmethod
def nodeToString(self, node):
return etree.tostring(node)
@classmethod
def replaceTag(self, node, tag):
node.tag = tag
@classmethod
def stripTags(self, node, *tags):
etree.strip_tags(node, *tags)
@classmethod
def getElementById(self, node, idd):
selector = '//*[@id="%s"]' % idd
elems = node.xpath(selector)
if elems:
return elems[0]
return None
@classmethod
def getElementsByTag(self, node, tag=None, attr=None, value=None, childs=False):
NS = "http://exslt.org/regular-expressions"
# selector = tag or '*'
selector = 'descendant-or-self::%s' % (tag or '*')
if attr and value:
selector = '%s[re:test(@%s, "%s", "i")]' % (selector, attr, value)
elems = node.xpath(selector, namespaces={"re": NS})
# remove the root node
# if we have a selection tag
if node in elems and (tag or childs):
elems.remove(node)
return elems
@classmethod
def appendChild(self, node, child):
node.append(child)
@classmethod
def childNodes(self, node):
return list(node)
@classmethod
def childNodesWithText(self, node):
root = node
# create the first text node
# if we have some text in the node
if root.text:
t = lxmlhtml.HtmlElement()
t.text = root.text
t.tag = 'text'
root.text = None
root.insert(0, t)
# loop childs
for c, n in enumerate(list(root)):
idx = root.index(n)
# don't process texts nodes
if n.tag == 'text':
continue
# create a text node for tail
if n.tail:
t = self.createElement(tag='text', text=n.tail, tail=None)
root.insert(idx + 1, t)
return list(root)
@classmethod
def textToPara(self, text):
return self.fromstring(text)
@classmethod
def getChildren(self, node):
return node.getchildren()
@classmethod
def getElementsByTags(self, node, tags):
selector = ','.join(tags)
elems = self.css_select(node, selector)
# remove the root node
# if we have a selection tag
if node in elems:
elems.remove(node)
return elems
@classmethod
def createElement(self, tag='p', text=None, tail=None):
t = lxmlhtml.HtmlElement()
t.tag = tag
t.text = text
t.tail = tail
return t
@classmethod
def getComments(self, node):
return node.xpath('//comment()')
@classmethod
def getParent(self, node):
return node.getparent()
@classmethod
def remove(self, node):
parent = node.getparent()
if parent is not None:
if node.tail:
prev = node.getprevious()
if prev is None:
if not parent.text:
parent.text = ''
parent.text += u' ' + node.tail
else:
if not prev.tail:
prev.tail = ''
prev.tail += u' ' + node.tail
node.clear()
parent.remove(node)
@classmethod
def getTag(self, node):
return node.tag
@classmethod
def getText(self, node):
txts = [i for i in node.itertext()]
return innerTrim(u' '.join(txts).strip())
@classmethod
def previousSiblings(self, node):
nodes = []
for c, n in enumerate(node.itersiblings(preceding=True)):
nodes.append(n)
return nodes
@classmethod
def previousSibling(self, node):
nodes = []
for c, n in enumerate(node.itersiblings(preceding=True)):
nodes.append(n)
if c == 0:
break
return nodes[0] if nodes else None
@classmethod
def nextSibling(self, node):
nodes = []
for c, n in enumerate(node.itersiblings(preceding=False)):
nodes.append(n)
if c == 0:
break
return nodes[0] if nodes else None
@classmethod
def isTextNode(self, node):
return True if node.tag == 'text' else False
@classmethod
def getAttribute(self, node, attr=None):
if attr:
return node.attrib.get(attr, None)
return attr
@classmethod
def delAttribute(self, node, attr=None):
if attr:
_attr = node.attrib.get(attr, None)
if _attr:
del node.attrib[attr]
@classmethod
def setAttribute(self, node, attr=None, value=None):
if attr and value:
node.set(attr, value)
@classmethod
def outerHtml(self, node):
e0 = node
if e0.tail:
e0 = deepcopy(e0)
e0.tail = None
return self.nodeToString(e0)
class ParserSoup(Parser):
@classmethod
def fromstring(self, html):
html = encodeValue(html)
self.doc = soupparser.fromstring(html)
return self.doc
|
[
"lxml.etree.strip_tags",
"lxml.html.fromstring",
"lxml.html.soupparser.fromstring",
"copy.deepcopy",
"goose.text.encodeValue",
"lxml.etree.tostring",
"lxml.html.HtmlElement"
] |
[((1624, 1641), 'goose.text.encodeValue', 'encodeValue', (['html'], {}), '(html)\n', (1635, 1641), False, 'from goose.text import encodeValue\n'), ((1661, 1686), 'lxml.html.fromstring', 'lxmlhtml.fromstring', (['html'], {}), '(html)\n', (1680, 1686), True, 'import lxml.html as lxmlhtml\n'), ((1778, 1798), 'lxml.etree.tostring', 'etree.tostring', (['node'], {}), '(node)\n', (1792, 1798), False, 'from lxml import etree\n'), ((1941, 1970), 'lxml.etree.strip_tags', 'etree.strip_tags', (['node', '*tags'], {}), '(node, *tags)\n', (1957, 1970), False, 'from lxml import etree\n'), ((4202, 4224), 'lxml.html.HtmlElement', 'lxmlhtml.HtmlElement', ([], {}), '()\n', (4222, 4224), True, 'import lxml.html as lxmlhtml\n'), ((6788, 6805), 'goose.text.encodeValue', 'encodeValue', (['html'], {}), '(html)\n', (6799, 6805), False, 'from goose.text import encodeValue\n'), ((6825, 6852), 'lxml.html.soupparser.fromstring', 'soupparser.fromstring', (['html'], {}), '(html)\n', (6846, 6852), False, 'from lxml.html import soupparser\n'), ((3109, 3131), 'lxml.html.HtmlElement', 'lxmlhtml.HtmlElement', ([], {}), '()\n', (3129, 3131), True, 'import lxml.html as lxmlhtml\n'), ((6618, 6630), 'copy.deepcopy', 'deepcopy', (['e0'], {}), '(e0)\n', (6626, 6630), False, 'from copy import deepcopy\n')]
|
#
# Created on Thu Apr 22 2021
# <NAME>
#
import boto3
from botocore.exceptions import ClientError
import logging
logging.basicConfig(filename="rps.log", level=logging.INFO)
iam_resource = boto3.resource("iam")
sts_client = boto3.client("sts")
def create_role(
iam_role_name: str, assume_role_policy_json: str, policy_arns: list
) -> iam_resource.Role:
"""
Create an IAM role with a given policy.
:param assume_role_policy_json: A json string that represents the assume
role policy defining what resources are allowed to assume the role.
:param policy_arns: a list of strings representing existing policy arns to
also attach to the role
:return: IAM role object
This method was adapted from the create_iam_role_for_lambda() method found here:
https://docs.aws.amazon.com/code-samples/latest/catalog/python-lambda-boto_client_examples-lambda_basics.py.html
"""
try:
role = iam_resource.create_role(
RoleName=iam_role_name,
AssumeRolePolicyDocument=assume_role_policy_json,
)
# wait for the creation to complete
iam_resource.meta.client.get_waiter("role_exists").wait(RoleName=iam_role_name)
# attach the additional supplied policies
for arn in policy_arns:
role.attach_policy(PolicyArn=arn)
except ClientError as error:
if error.response["Error"]["Code"] == "EntityAlreadyExists":
role = iam_resource.Role(iam_role_name)
logging.warning("The role %s already exists. Using it.", iam_role_name)
return role
else:
logging.error(error.response["Error"]["Message"])
logging.exception(
"Couldn't create role %s or attach policy %s.",
iam_role_name,
str(policy_arns),
)
raise
else:
logging.info("Created IAM role %s.", role.name)
logging.info("Attached policies %s to role %s.", policy_arns, role.name)
return role
def create_policy(policy_name: str, policy_json: str) -> iam_resource.Policy:
"""
Create an IAM policy of given name and json description.
Policies define permissions in AWS and can be associated with IAM roles.
:param policy_json: just be a valid policy json string
:return: IAM Policy object
"""
try:
policy = iam_resource.create_policy(
PolicyName=policy_name, PolicyDocument=policy_json
)
except ClientError as error:
if error.response["Error"]["Code"] == "EntityAlreadyExists":
policy = get_policy_by_name(policy_name)
logging.warning("The policy %s already exists. Using it.", policy.arn)
return policy
else:
logging.error(error.response["Error"]["Message"])
logging.exception("Couldn't create policy %s", policy_name)
raise
else:
logging.info("Created Policy '%s'", policy_name)
return policy
def get_policy_by_name(policy_name: str) -> iam_resource.Policy:
"""
Get an existing policy by name.
:return: IAM Policy object
"""
# sts provides the account number of the current credentials
account_id = sts_client.get_caller_identity()["Account"]
# policy arns consist of an account id and policy name
policy_arn = f"arn:aws:iam::{account_id}:policy/{policy_name}"
# policies are created in the Python SDK via their arn
policy = iam_resource.Policy(policy_arn)
return policy
def delete_role(iam_role) -> dict:
"""
Delete a role.
:param iam_role: this parameter is an IAM role object, such as returned
by create_role()
"""
try:
# remove all policies before deleting role
for policy in iam_role.attached_policies.all():
policy.detach_role(RoleName=iam_role.name)
response = iam_role.delete()
except ClientError as error:
logging.error(error.response["Error"]["Message"])
logging.error("Couldn't delete role %s", iam_role.name)
else:
logging.info("Deleted role '%s'", iam_role.name)
return response
def delete_policy(iam_policy) -> dict:
"""
Delete a role.
:param iam_policy: this parameter is an IAM policy object, such as returned
by create_policy()
"""
try:
response = iam_policy.delete()
except ClientError as error:
logging.error(error.response["Error"]["Message"])
logging.error("Couldn't delete policy %s", iam_policy.arn)
else:
logging.info("Deleted policy '%s'", iam_policy.arn)
return response
if __name__ == "__main__":
# brief functionality test with delete() cleanup at end
policy_json_file = "./policy/lambda_policy.json"
with open(policy_json_file) as file:
policy_json = file.read()
policy_name = "test_policy"
policy = create_policy(policy_name, policy_json)
print("new policy arn: ", policy.arn)
policy.delete()
|
[
"logging.basicConfig",
"boto3.client",
"logging.warning",
"logging.exception",
"boto3.resource",
"logging.info",
"logging.error"
] |
[((115, 174), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""rps.log"""', 'level': 'logging.INFO'}), "(filename='rps.log', level=logging.INFO)\n", (134, 174), False, 'import logging\n'), ((191, 212), 'boto3.resource', 'boto3.resource', (['"""iam"""'], {}), "('iam')\n", (205, 212), False, 'import boto3\n'), ((226, 245), 'boto3.client', 'boto3.client', (['"""sts"""'], {}), "('sts')\n", (238, 245), False, 'import boto3\n'), ((1877, 1924), 'logging.info', 'logging.info', (['"""Created IAM role %s."""', 'role.name'], {}), "('Created IAM role %s.', role.name)\n", (1889, 1924), False, 'import logging\n'), ((1933, 2005), 'logging.info', 'logging.info', (['"""Attached policies %s to role %s."""', 'policy_arns', 'role.name'], {}), "('Attached policies %s to role %s.', policy_arns, role.name)\n", (1945, 2005), False, 'import logging\n'), ((2925, 2973), 'logging.info', 'logging.info', (['"""Created Policy \'%s\'"""', 'policy_name'], {}), '("Created Policy \'%s\'", policy_name)\n', (2937, 2973), False, 'import logging\n'), ((4070, 4118), 'logging.info', 'logging.info', (['"""Deleted role \'%s\'"""', 'iam_role.name'], {}), '("Deleted role \'%s\'", iam_role.name)\n', (4082, 4118), False, 'import logging\n'), ((4546, 4597), 'logging.info', 'logging.info', (['"""Deleted policy \'%s\'"""', 'iam_policy.arn'], {}), '("Deleted policy \'%s\'", iam_policy.arn)\n', (4558, 4597), False, 'import logging\n'), ((3938, 3987), 'logging.error', 'logging.error', (["error.response['Error']['Message']"], {}), "(error.response['Error']['Message'])\n", (3951, 3987), False, 'import logging\n'), ((3996, 4051), 'logging.error', 'logging.error', (['"""Couldn\'t delete role %s"""', 'iam_role.name'], {}), '("Couldn\'t delete role %s", iam_role.name)\n', (4009, 4051), False, 'import logging\n'), ((4411, 4460), 'logging.error', 'logging.error', (["error.response['Error']['Message']"], {}), "(error.response['Error']['Message'])\n", (4424, 4460), False, 'import logging\n'), ((4469, 4527), 'logging.error', 'logging.error', (['"""Couldn\'t delete policy %s"""', 'iam_policy.arn'], {}), '("Couldn\'t delete policy %s", iam_policy.arn)\n', (4482, 4527), False, 'import logging\n'), ((1495, 1566), 'logging.warning', 'logging.warning', (['"""The role %s already exists. Using it."""', 'iam_role_name'], {}), "('The role %s already exists. Using it.', iam_role_name)\n", (1510, 1566), False, 'import logging\n'), ((1617, 1666), 'logging.error', 'logging.error', (["error.response['Error']['Message']"], {}), "(error.response['Error']['Message'])\n", (1630, 1666), False, 'import logging\n'), ((2644, 2714), 'logging.warning', 'logging.warning', (['"""The policy %s already exists. Using it."""', 'policy.arn'], {}), "('The policy %s already exists. Using it.', policy.arn)\n", (2659, 2714), False, 'import logging\n'), ((2767, 2816), 'logging.error', 'logging.error', (["error.response['Error']['Message']"], {}), "(error.response['Error']['Message'])\n", (2780, 2816), False, 'import logging\n'), ((2829, 2888), 'logging.exception', 'logging.exception', (['"""Couldn\'t create policy %s"""', 'policy_name'], {}), '("Couldn\'t create policy %s", policy_name)\n', (2846, 2888), False, 'import logging\n')]
|
import collections
import copy
import intervaltree
from .label import Label
class LabelList:
"""
Represents a list of labels which describe an utterance.
An utterance can have multiple label-lists.
Args:
idx (str): An unique identifier for the label-list
within a corpus for one utterance.
labels (list): The list containing the
:py:class:`audiomate.annotations.Label`.
Attributes:
utterance (Utterance): The utterance this label-list is belonging to.
label_tree (IntervalTree): The interval-tree storing the labels.
Example:
>>> label_list = LabelList(idx='transcription', labels=[
>>> Label('this', 0, 2),
>>> Label('is', 2, 4),
>>> Label('timmy', 4, 8)
>>> ])
"""
__slots__ = ['idx', 'label_tree', 'utterance']
def __init__(self, idx='default', labels=None):
self.idx = idx
self.utterance = None
self.label_tree = intervaltree.IntervalTree()
if labels is not None:
self.update(labels)
def __eq__(self, other):
data_this = (self.idx, self.label_tree)
data_other = (other.idx, other.label_tree)
return data_this == data_other
def __iter__(self):
for interval in self.label_tree:
yield interval.data
def __len__(self):
return self.label_tree.__len__()
def __copy__(self):
# utterance is ignored intentionally,
# since it is kind of a weak ref
return LabelList(
idx=self.idx,
labels=[iv.data for iv in self.label_tree]
)
def __deepcopy__(self, memo):
# utterance is ignored intentionally,
# since it is kind of a weak ref
return LabelList(
idx=self.idx,
labels=copy.deepcopy([iv.data for iv in self.label_tree], memo)
)
@property
def labels(self):
""" Return list of labels. """
return list(self)
@property
def start(self):
""" Return start of the earliest starting label (lower bound). """
return self.label_tree.begin()
@property
def end(self):
""" Return end of the lastly ending label (upper bound). """
return self.label_tree.end()
@property
def total_length(self):
"""
Return the cumulative length of all labels
(Number of characters).
"""
return sum(label.length for label in self.labels)
#
# Alteration
#
def add(self, label):
"""
Add a label to the end of the list.
Args:
label (Label): The label to add.
"""
label.label_list = self
self.label_tree.addi(label.start, label.end, label)
def addl(self, value, start=0.0, end=float('inf')):
""" Shortcut for ``add(Label(value, start, end))``. """
self.add(Label(value, start=start, end=end))
def update(self, labels):
"""
Add a list of labels to the end of the list.
Args:
labels (list): Labels to add.
"""
ivs = []
for label in labels:
label.label_list = self
ivs.append(intervaltree.Interval(label.start, label.end, label))
self.label_tree.update(ivs)
def apply(self, fn):
"""
Apply the given function `fn` to every label in this label list.
`fn` is a function of one argument that receives the current label
which can then be edited in place.
Args:
fn (func): Function to apply to every label
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('another_label', 2.0, 3.0)
... ])
>>> def shift_labels(label):
... label.start += 1.0
... label.end += 1.0
...
>>> ll.apply(shift_labels)
>>> ll.labels
[Label(a_label, 2.0, 3.0), Label(another_label, 3.0, 4.0)]
"""
for label in self.labels:
fn(label)
def merge_overlaps(self, threshold=0.0):
"""
Merge overlapping labels with the same value.
Two labels are considered overlapping,
if ``l2.start - l1.end < threshold``.
Args:
threshold (float): Maximal distance between two labels
to be considered as overlapping.
(default: 0.0)
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('a_label', 1.5, 2.7),
... Label('b_label', 1.0, 2.0),
... ])
>>> ll.merge_overlapping_labels()
>>> ll.labels
[
Label('a_label', 1.0, 2.7),
Label('b_label', 1.0, 2.0),
]
"""
updated_labels = []
all_intervals = self.label_tree.copy()
# recursivly find a group of overlapping labels with the same value
def recursive_overlaps(interval):
range_start = interval.begin - threshold
range_end = interval.end + threshold
direct_overlaps = all_intervals.overlap(range_start, range_end)
all_overlaps = [interval]
all_intervals.discard(interval)
for overlap in direct_overlaps:
if overlap.data.value == interval.data.value:
all_overlaps.extend(recursive_overlaps(overlap))
return all_overlaps
# For every remaining interval
# - Find overlapping intervals recursively
# - Remove them
# - Create a concatenated new label
while not all_intervals.is_empty():
next_interval = list(all_intervals)[0]
overlapping = recursive_overlaps(next_interval)
ov_start = float('inf')
ov_end = 0.0
ov_value = next_interval.data.value
for overlap in overlapping:
ov_start = min(ov_start, overlap.begin)
ov_end = max(ov_end, overlap.end)
all_intervals.discard(overlap)
updated_labels.append(Label(
ov_value,
ov_start,
ov_end
))
# Replace the old labels with the updated ones
self.label_tree.clear()
self.update(updated_labels)
#
# Statistics
#
def label_total_duration(self):
"""
Return for each distinct label value the total duration of
all occurrences.
Returns:
dict: A dictionary containing for every label-value (key)
the total duration in seconds (value).
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3, 5),
>>> Label('b', 5, 8),
>>> Label('a', 8, 10),
>>> Label('b', 10, 14),
>>> Label('a', 15, 18.5)
>>> ])
>>> ll.label_total_duration()
{'a': 7.5 'b': 7.0}
"""
durations = collections.defaultdict(float)
for label in self:
durations[label.value] += label.duration
return durations
def label_values(self):
"""
Return a list of all occuring label values.
Returns:
list: Lexicographically sorted list (str) of label values.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14),
>>> Label('d', 15, 18)
>>> ])
>>> ll.label_values()
['a', 'b', 'c', 'd']
"""
all_labels = {l.value for l in self}
return sorted(all_labels)
def label_count(self):
"""
Return for each label the number of occurrences within the list.
Returns:
dict: A dictionary containing for every label-value (key)
the number of occurrences (value).
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('a', 7.2, 10.5),
>>> Label('b', 10.5, 14),
>>> Label('a', 15, 18)
>>> ])
>>> ll.label_count()
{'a': 3 'b': 2}
"""
occurrences = collections.defaultdict(int)
for label in self:
occurrences[label.value] += 1
return occurrences
def all_tokens(self, delimiter=' '):
"""
Return a list of all tokens occurring in the label-list.
Args:
delimiter (str): The delimiter used to split labels into tokens.
See :meth:`audiomate.annotations.Label.tokenized`
Returns:
:class:`set`: A set of distinct tokens.
"""
tokens = set()
for label in self:
tokens = tokens.union(set(label.tokenized(delimiter=delimiter)))
return tokens
#
# Query Label Values
#
def join(self, delimiter=' ', overlap_threshold=0.1):
"""
Return a string with all labels concatenated together.
The order of the labels is defined by the start of the label.
If the overlapping between two labels is greater than
``overlap_threshold``, an Exception is thrown.
Args:
delimiter (str): A string to join two consecutive labels.
overlap_threshold (float): Maximum overlap between two
consecutive labels.
Returns:
str: A string with all labels concatenated together.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c', start=7.0, end=10.2),
>>> Label('d', start=10.3, end=14.0)
>>> ])
>>> ll.join(' - ')
'a - b - c - d'
"""
sorted_by_start = sorted(self.labels)
concat_values = []
last_label_end = None
for label in sorted_by_start:
if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0):
concat_values.append(label.value)
last_label_end = label.end
else:
raise ValueError('Labels overlap, not able to define the correct order')
return delimiter.join(concat_values)
def tokenized(self, delimiter=' ', overlap_threshold=0.1):
"""
Return a ordered list of tokens based on all labels.
Joins all token from all labels (``label.tokenized()```).
If the overlapping between two labels is greater than
``overlap_threshold``, an Exception is thrown.
Args:
delimiter (str): The delimiter used to split labels into tokens.
(default: space)
overlap_threshold (float): Maximum overlap between two
consecutive labels.
Returns:
str: A list containing tokens of all labels ordered according
to the label order.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a d q', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c a', start=7.0, end=10.2),
>>> Label('f g', start=10.3, end=14.0)
>>> ])
>>> ll.tokenized(delimiter=' ', overlap_threshold=0.1)
['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g']
"""
sorted_by_start = sorted(self.labels)
tokens = []
last_label_end = None
for label in sorted_by_start:
if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0):
tokens.extend(label.tokenized(delimiter=delimiter))
last_label_end = label.end
else:
raise ValueError('Labels overlap, not able to define the correct order')
return tokens
#
# Restructuring
#
def separated(self):
"""
Create a separate Label-List for every distinct label-value.
Returns:
dict: A dictionary with distinct label-values as keys. Every value
is a LabelList containing only labels with the same value.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('a', start=7.0, end=10.2),
>>> Label('b', start=10.3, end=14.0)
>>> ])
>>> s = ll.separate()
>>> s['a'].labels
[Label('a', start=0, end=4), Label('a', start=7.0, end=10.2)]
>>> s['b'].labels
[Label('b', start=3.95, end=6.0), Label('b', start=10.3, end=14.0)]
"""
separated_lls = collections.defaultdict(LabelList)
for label in self.labels:
separated_lls[label.value].add(label)
for ll in separated_lls.values():
ll.idx = self.idx
return separated_lls
def labels_in_range(self, start, end, fully_included=False):
"""
Return a list of labels, that are within the given range.
Also labels that only overlap are included.
Args:
start(float): Start-time in seconds.
end(float): End-time in seconds.
fully_included(bool): If ``True``, only labels fully included
in the range are returned. Otherwise
also overlapping ones are returned.
(default ``False``)
Returns:
list: List of labels in the range.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14)
>>>])
>>> ll.labels_in_range(6.2, 10.1)
[Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)]
"""
if fully_included:
intervals = self.label_tree.envelop(start, end)
else:
intervals = self.label_tree.overlap(start, end)
return [iv.data for iv in intervals]
def ranges(self, yield_ranges_without_labels=False, include_labels=None):
"""
Generate all ranges of the label-list. A range is defined
as a part of the label-list for which the same labels are defined.
Args:
yield_ranges_without_labels(bool): If True also yields ranges for
which no labels are defined.
include_labels(list): If not empty, only the label values in
the list will be considered.
Returns:
generator: A generator which yields one range
(tuple start/end/list-of-labels) at a time.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14)
>>>])
>>> ranges = ll.ranges()
>>> next(ranges)
(3.2, 4.5, [ < audiomate.annotations.Label at 0x1090527c8 > ])
>>> next(ranges)
(4.5, 5.1, [])
>>> next(ranges)
(5.1, 7.2, [ < audiomate.annotations.label.Label at 0x1090484c8 > ])
"""
tree_copy = self.label_tree.copy()
# Remove labels not included
if include_labels is not None:
for iv in list(tree_copy):
if iv.data.value not in include_labels:
tree_copy.remove(iv)
def reduce(x, y):
x.append(y)
return x
# Split labels when overlapping and merge equal ranges to a list of labels
tree_copy.split_overlaps()
tree_copy.merge_equals(data_reducer=reduce, data_initializer=[])
intervals = sorted(tree_copy)
last_end = intervals[0].begin
# yield range by range
for iv in intervals:
# yield an empty range if necessary
if yield_ranges_without_labels and iv.begin > last_end:
yield (last_end, iv.begin, [])
yield (iv.begin, iv.end, iv.data)
last_end = iv.end
def split(self, cutting_points, shift_times=False, overlap=0.0):
"""
Split the label-list into x parts and return them as new label-lists.
x is defined by the number of cutting-points
(``x == len(cutting_points) + 1``).
The result is a list of label-lists corresponding to each part.
Label-list 0 contains labels between ``0`` and ``cutting_points[0]``.
Label-list 1 contains labels between ``cutting_points[0]`` and
``cutting_points[1]``. And so on.
Args:
cutting_points(list): List of floats defining the points in seconds,
where the label-list is splitted.
shift_times(bool): If True, start and end-time are shifted in
splitted label-lists. So the start is relative
to the cutting point and not to the beginning
of the original label-list.
overlap(float): Amount of overlap in seconds. This amount is
subtracted from a start-cutting-point, and added
to a end-cutting-point.
Returns:
list: A list of of: class: `audiomate.annotations.LabelList`.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>> Label('c', 11, 15),
>>>])
>>>
>>> res = ll.split([4.1, 8.9, 12.0])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.1)]
>>> res[1].labels
[
Label('a', 4.1, 5.0),
Label('b', 5.0, 8.9)
]
>>> res[2].labels
[
Label('b', 8.9, 10.0),
Label('c', 11.0, 12.0)
]
>>> res[3].labels
[Label('c', 12.0, 15.0)]
If ``shift_times = True``, the times are adjusted to be relative
to the cutting-points for every label-list but the first.
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>>])
>>>
>>> res = ll.split([4.6])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.6)]
>>> res[1].labels
[
Label('a', 0.0, 0.4),
Label('b', 0.4, 5.4)
]
"""
if len(cutting_points) == 0:
raise ValueError('At least one cutting-point is needed!')
# we have to loop in sorted order
cutting_points = sorted(cutting_points)
splits = []
iv_start = 0.0
for i in range(len(cutting_points) + 1):
if i < len(cutting_points):
iv_end = cutting_points[i]
else:
iv_end = float('inf')
# get all intervals intersecting range
intervals = self.label_tree.overlap(
iv_start - overlap,
iv_end + overlap
)
cp_splits = LabelList(idx=self.idx)
# Extract labels from intervals with updated times
for iv in intervals:
label = copy.deepcopy(iv.data)
label.start = max(0, iv_start - overlap, label.start)
label.end = min(iv_end + overlap, label.end)
if shift_times:
orig_start = max(0, iv_start - overlap)
label.start -= orig_start
label.end -= orig_start
cp_splits.add(label)
splits.append(cp_splits)
iv_start = iv_end
return splits
#
# Convenience Constructors
#
@classmethod
def create_single(cls, value, idx='default'):
"""
Create a label-list with a single label
containing the given value.
"""
return LabelList(idx=idx, labels=[
Label(value=value)
])
@classmethod
def with_label_values(cls, values, idx='default'):
"""
Create a new label-list containing labels with the given values.
All labels will have default start/end values of 0 and ``inf``.
Args:
values(list): List of values(str) that should be created and
appended to the label-list.
idx(str): The idx of the label-list.
Returns:
(LabelList): New label-list.
Example:
>>> ll = LabelList.with_label_values(['a', 'x', 'z'], idx='letters')
>>> ll.idx
'letters'
>>> ll.labels
[
Label('a', 0, inf),
Label('x', 0, inf),
Label('z', 0, inf),
]
"""
ll = LabelList(idx=idx)
for label_value in values:
ll.add(Label(label_value))
return ll
|
[
"intervaltree.IntervalTree",
"intervaltree.Interval",
"collections.defaultdict",
"copy.deepcopy"
] |
[((1013, 1040), 'intervaltree.IntervalTree', 'intervaltree.IntervalTree', ([], {}), '()\n', (1038, 1040), False, 'import intervaltree\n'), ((7210, 7240), 'collections.defaultdict', 'collections.defaultdict', (['float'], {}), '(float)\n', (7233, 7240), False, 'import collections\n'), ((8603, 8631), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (8626, 8631), False, 'import collections\n'), ((13319, 13353), 'collections.defaultdict', 'collections.defaultdict', (['LabelList'], {}), '(LabelList)\n', (13342, 13353), False, 'import collections\n'), ((1858, 1914), 'copy.deepcopy', 'copy.deepcopy', (['[iv.data for iv in self.label_tree]', 'memo'], {}), '([iv.data for iv in self.label_tree], memo)\n', (1871, 1914), False, 'import copy\n'), ((3249, 3301), 'intervaltree.Interval', 'intervaltree.Interval', (['label.start', 'label.end', 'label'], {}), '(label.start, label.end, label)\n', (3270, 3301), False, 'import intervaltree\n'), ((20218, 20240), 'copy.deepcopy', 'copy.deepcopy', (['iv.data'], {}), '(iv.data)\n', (20231, 20240), False, 'import copy\n')]
|
import pandas as pd
from OCAES import ocaes
# ----------------------
# create and run model
# ----------------------
data = pd.read_csv('timeseries_inputs_2019.csv')
inputs = ocaes.get_default_inputs()
# inputs['C_well'] = 5000.0
# inputs['X_well'] = 50.0
# inputs['L_well'] = 50.0
# inputs['X_cmp'] = 0
# inputs['X_exp'] = 0
model = ocaes(data, inputs)
df, s = model.get_full_results()
revenue, LCOE, COVE, avoided_emissions = model.post_process(s)
s['revenue'] = revenue
s['LCOE'] = LCOE
s['COVE'] = COVE
s['avoided_emissions'] = avoided_emissions
df.to_csv('results_timeseries.csv')
s.to_csv('results_values.csv')
print(model.calculate_LCOE(s))
# ----------------------
# create plots using built-in functions
# ----------------------
model.plot_overview()
model.plot_power_energy()
|
[
"OCAES.ocaes.get_default_inputs",
"OCAES.ocaes",
"pandas.read_csv"
] |
[((125, 166), 'pandas.read_csv', 'pd.read_csv', (['"""timeseries_inputs_2019.csv"""'], {}), "('timeseries_inputs_2019.csv')\n", (136, 166), True, 'import pandas as pd\n'), ((176, 202), 'OCAES.ocaes.get_default_inputs', 'ocaes.get_default_inputs', ([], {}), '()\n', (200, 202), False, 'from OCAES import ocaes\n'), ((335, 354), 'OCAES.ocaes', 'ocaes', (['data', 'inputs'], {}), '(data, inputs)\n', (340, 354), False, 'from OCAES import ocaes\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.