code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
"""
"""
import sys
import itertools
from climate.lib import mapper
from climate.lib import utilities
from climate.lib import inquirers
from climate.lib.inquirers import INQUIRER_TABLE
from climate.lib.converters import CONVERSION_TABLE
from climate.lib.converters import map_int, map_float, map_bool, map_list
from . import Parsing
from . import Help
class Menu(object):
"""Class For Handling Application Menu Navigation
Will be disabled if the setting 'use_menu' is set to false
Parameters
----------
cli_data: dict
Cli data passed through from main CliMate class.
"""
current_local = []
locations = []
help_mapper = {
"Show Commands": "display_help",
"Show Documentation": "show_docs"
}
standard_option_mapper = {
"Help": "open_help_menu",
"Exit": "exit_application"
}
def __init__(self, cli_data, settings):
self.cli_data = cli_data
self.settings = settings
def open_main_menu(self):
if "menu" not in self.cli_data["general"]:
self.standard_navigation()
else:
self.locations = self.cli_data["general"]["menu"]
self.menued_navigation()
def standard_navigation(self):
commands = self.cli_data["commands"]
command_keys = [key for key in commands]
command_names = [commands[key]["name"] for key in commands]
menu_names = command_names.copy()
menu_names += self.add_menu_options()
app_name = self.settings["app_name"]
menu_message = app_name if app_name is not None else "Main Menu"
command_menu_name = inquirers.inquirer_list(
menu_names, menu_message)
if command_menu_name in command_names:
command_name_index = command_names.index(command_menu_name)
command_key = command_keys[command_name_index]
command_args = commands[command_key]["arguments"]
parsed_command_args = \
Parsing.resolve_command_arguments(
command_args, self.cli_data)
command_target = commands[command_key]["target"]
command_arguments = self.menu_arguments(parsed_command_args)
Parsing.call_target(
command_key, command_target, command_arguments, self.settings)
else:
# standard application option was chosen (i.e one not in cli file)
method_string = self.standard_option_mapper[command_menu_name]
getattr(self, method_string)()
def menued_navigation(self):
while True:
command_found = False
if not self.current_local:
local = self.locations
else:
local = self.resolve_local(self.current_local)
if isinstance(local, dict):
local_func, local_args = inquirers.get_inquirer("list")
local_args["choices"] = local
if self.current_local:
local["Back"] = "navigate_back"
local_args["message"] = self.current_local[-1]
else:
# add buttons to main menu
for key in self.standard_option_mapper:
if self.settings[f"menu_{key.lower()}"]:
local[key] = self.standard_option_mapper[key]
app_name = self.settings["app_name"]
local_args["message"] = \
app_name if app_name is not None else "Main Menu"
nav_point = local_func(**local_args)
self.current_local += [nav_point]
elif isinstance(local, str):
try:
self.navigate_back()
if local not in [*self.cli_data["commands"]]:
command_found = False
getattr(self, local)()
else:
command_found = True
chosen_comamnd = self.cli_data["commands"][local]
command_target = chosen_comamnd["target"]
args = chosen_comamnd["arguments"]
resolved_arguments = \
Parsing.resolve_command_arguments(args, self.cli_data)
arguments = self.menu_arguments(resolved_arguments)
Parsing.call_target(command_target, arguments)
except KeyError:
TypeError("Error in chosen command.")
else:
raise TypeError("Invalid Datatype Found For Menu Navigation.")
if command_found:
if self.settings["exit_upon_command"]:
sys.exit()
def open_help_menu(self):
help_func, help_args = inquirers.get_inquirer("choices")
help_args["choices"] = [key for key in self.help_mapper]
message = self.settings["help_menu_message"]
help_args["message"] = message if message is not None else "Help"
help_choice = help_func(**help_args)
help_handler = Help(self.cli_data, self.settings)
help_method_string = self.help_mapper[help_choice]
getattr(help_handler, help_method_string)()
def exit_application(self):
print("Exiting Application")
sys.exit(0)
def add_menu_options(self):
navigations = []
for key in self.settings:
if "menu" == key.split("_")[0]:
navigations.append(
[self.settings[key], key.split("_")[1].capitalize()])
return [nav[1] for nav in navigations if nav[0]]
def resolve_local(self, keys):
local = self.locations
for key in keys:
local = local[key]
return local
def navigate_back(self):
del self.current_local[-1]
@staticmethod
def menu_arguments(command_args):
"""Uses Pyinquirer to get desired arguments through MenuHandler.
Parameters
----------
command_args: dict
Dictionary containing the command arguments.
Returns
-------
arguments: dict
Dictionary containing desired and chosen arguments.
"""
try:
arguments = {}
for arg in command_args:
inquirer_function, inquirer_args = \
inquirers.get_inquirer(command_args[arg]["type"])
inquirer_args["message"] = command_args[arg]["name"]
if "default" in command_args[arg]:
inquirer_args["message"] = "{} ({})".format(inquirer_args["message"], command_args[arg]["default"])
if command_args[arg]["type"] == "choices":
if "map" in command_args[arg]:
inquirer_args["choices"] = \
mapper.map_string(
command_args[arg]["map"], arguments)
else:
inquirer_args["choices"] = \
[c for c in command_args[arg]["choices"].values()]
if "fallback" in command_args[arg]:
fallback_option = command_args[arg]["fallback"]
inquirer_args["choices"] += [fallback_option]
def fallback(x):
if x == command_args[arg]["fallback"]:
if "default" not in command_args[arg]:
return None
else:
return command_args[arg]["default"]
else:
choices = command_args[arg]["choices"]
return list(choices.keys())[
list(choices.values()).index(x)]
inquirer_args["lambda_filter"] = fallback
else:
if "default" in command_args[arg]:
if "lambda_filter" in inquirer_args:
def full_conversion(x):
x = command_args[arg]["default"] if x.strip() is "" else x
if command_args[arg]["type"] == "float":
return float(x)
elif command_args[arg]["type"] == "int":
return int(x)
else:
return x
inquirer_args["lambda_filter"] = full_conversion
else:
inquirer_args["lambda_filter"] = lambda x: command_args[arg]["default"] if x.strip() is "" else x
arguments[arg] = inquirer_function(**inquirer_args)
except KeyError:
raise KeyError(f"Invalid Command argument '{arg}'")
return arguments
|
[
"climate.lib.mapper.map_string",
"sys.exit",
"climate.lib.inquirers.inquirer_list",
"climate.lib.inquirers.get_inquirer"
] |
[((1644, 1693), 'climate.lib.inquirers.inquirer_list', 'inquirers.inquirer_list', (['menu_names', 'menu_message'], {}), '(menu_names, menu_message)\n', (1667, 1693), False, 'from climate.lib import inquirers\n'), ((4833, 4866), 'climate.lib.inquirers.get_inquirer', 'inquirers.get_inquirer', (['"""choices"""'], {}), "('choices')\n", (4855, 4866), False, 'from climate.lib import inquirers\n'), ((5354, 5365), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5362, 5365), False, 'import sys\n'), ((2872, 2902), 'climate.lib.inquirers.get_inquirer', 'inquirers.get_inquirer', (['"""list"""'], {}), "('list')\n", (2894, 2902), False, 'from climate.lib import inquirers\n'), ((6415, 6464), 'climate.lib.inquirers.get_inquirer', 'inquirers.get_inquirer', (["command_args[arg]['type']"], {}), "(command_args[arg]['type'])\n", (6437, 6464), False, 'from climate.lib import inquirers\n'), ((4760, 4770), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4768, 4770), False, 'import sys\n'), ((6898, 6952), 'climate.lib.mapper.map_string', 'mapper.map_string', (["command_args[arg]['map']", 'arguments'], {}), "(command_args[arg]['map'], arguments)\n", (6915, 6952), False, 'from climate.lib import mapper\n')]
|
from django.conf import settings
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django.core.cache.backends.base import DEFAULT_TIMEOUT
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAdminUser
from rest_framework_extensions.mixins import NestedViewSetMixin
from common.permissions import IsAuthorOrReadOnly
from .models import Comment, Post, Tag
from .serializers import CommentSerializer, PostSerializer, TagSerializer
CACHE_TIMEOUT = getattr(settings, 'CACHE_TIMEOUT', DEFAULT_TIMEOUT)
class PostViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
queryset = Post.objects.all()
serializer_class = PostSerializer
lookup_field = 'slug'
permission_classes = [IsAuthenticatedOrReadOnly, IsAuthorOrReadOnly]
@method_decorator(cache_page(CACHE_TIMEOUT))
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
class CommentViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsAuthenticatedOrReadOnly, IsAuthorOrReadOnly]
class TagViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
queryset = Tag.objects.all()
serializer_class = TagSerializer
lookup_field = 'slug'
permission_classes = [IsAuthenticatedOrReadOnly]
@method_decorator(cache_page(CACHE_TIMEOUT))
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
|
[
"django.views.decorators.cache.cache_page"
] |
[((868, 893), 'django.views.decorators.cache.cache_page', 'cache_page', (['CACHE_TIMEOUT'], {}), '(CACHE_TIMEOUT)\n', (878, 893), False, 'from django.views.decorators.cache import cache_page\n'), ((1448, 1473), 'django.views.decorators.cache.cache_page', 'cache_page', (['CACHE_TIMEOUT'], {}), '(CACHE_TIMEOUT)\n', (1458, 1473), False, 'from django.views.decorators.cache import cache_page\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import random
import math
import logging
import itertools
from fairseq import utils
from fairseq.data import FairseqDataset, LanguagePairDataset
from .noise_util import apply_span_mask, apply_random_mask, apply_entity_mask_for_mlm
from fairseq.data import data_utils
logger = logging.getLogger(__name__)
def collate(
samples,
pad_idx,
eos_idx,
left_pad_source=False,
left_pad_target=False,
input_feeding=True,
pad_to_length=None,
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx, eos_idx, left_pad, move_eos_to_beginning,
pad_to_length=pad_to_length,
)
# sort by descending source length
src_lengths = torch.LongTensor([s['source'].ne(pad_idx).long().sum() for s in samples])
src_lengths, sort_order = src_lengths.sort(descending=True)
id = torch.LongTensor([s['id'] for s in samples]).index_select(0, sort_order)
src_tokens = merge('source', left_pad=left_pad_source).index_select(0, sort_order)
# sentence classification
cls_target = merge('cls_target', left_pad=left_pad_target).index_select(0, sort_order).view(-1)
# masked language model
mlm_target = merge('mlm_target', left_pad=left_pad_target).index_select(0, sort_order)
# causal language model
prev_output_tokens = merge('prev_output_tokens', left_pad=left_pad_target).index_select(0, sort_order)
prev_output_positions = merge('prev_output_positions', left_pad=left_pad_target).index_select(0, sort_order)
clm_target = merge('clm_target', left_pad=left_pad_target).index_select(0, sort_order)
# sequence tagging
tag_target = merge('tag_target', left_pad=left_pad_target).index_select(0, sort_order)
ntokens = src_lengths.sum().item()
batch = {
'id': id,
'nsentences': len(samples),
'ntokens': ntokens,
'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
'prev_output_tokens': prev_output_tokens,
'prev_output_positions': prev_output_positions,
},
'cls_target': cls_target,
'mlm_target': mlm_target,
'clm_target': clm_target,
'tag_target': tag_target,
}
return batch
class KnowledgeLanguagePairDataset(LanguagePairDataset):
@classmethod
def apply_mask(cls, dataset: torch.utils.data.Dataset, *args, **kwargs):
"""Return the source and target datasets for masked LM training."""
return cls(dataset, *args, **kwargs)
def __init__(
self, src, src_sizes, src_dict,
tgt=None, tgt_sizes=None, tgt_dict=None,
meta=None, meta_sizes=None, meta_dict=None,
left_pad_source=True, left_pad_target=False,
max_source_positions=1024, max_target_positions=1024,
shuffle=True,
mask_idx=None,
mask_prob=0.15, leave_unmasked_prob=0.1, random_token_prob=0.1,
mask_whole_words=None,
block_size=64,
sub_task=None,
):
super().__init__(src, src_sizes, src_dict,
tgt=tgt, tgt_sizes=tgt_sizes, tgt_dict=tgt_dict,
left_pad_source=left_pad_source, left_pad_target=left_pad_target,
shuffle=shuffle)
self.meta = meta
self.meta_sizes = meta_sizes
self.meta_dict = meta_dict
self.mask_idx = mask_idx
self.mask_prob = mask_prob
assert len(meta_sizes) == len(src_sizes)
self.sub_task = sub_task
self.cls_pad = self.src_dict.pad() # 0 in bert_dict, 1 in fairseq_dict
self.block_size = block_size
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.pred_probs = torch.FloatTensor(
[1 - leave_unmasked_prob - random_token_prob, leave_unmasked_prob, random_token_prob])
self.debug_size_for_mlm = 0
self.debug_size_for_clm = 0
self.debug_size_for_tag = 0
self.debug_size_for_cls = 0
self.debug_size_for_titlegen = 0
def _parse_ocr_data(self, src_item):
"""
Args:
src_item:
- title [SEP] content [SEP] title [SEP] content.
- used for title generation
- file: discovery_all.ocr
"""
def _get_title_and_content(sep_idx):
title_pos = []
content_pos = []
for i, pos in enumerate(sep_idx):
last_pos = sep_idx[i - 1] if i > 0 else 1
pos_range = np.arange(last_pos + 1,
pos) if pos > last_pos + 1 else None
if i % 2 == 0:
title_pos.append(pos_range)
else:
content_pos.append(pos_range)
if len(content_pos) < len(title_pos):
content_pos.append(None)
return title_pos, content_pos
src_item_np = np.array(src_item)
sep_idx = np.where(src_item_np == self.src_dict.eos())[0]
title_positions, content_positions = _get_title_and_content(sep_idx)
source = src_item[:1]
clm_target = np.array([], dtype=src_item_np.dtype)
prev_output_positions_list = []
sep_positions_list = []
for title_position, content_position in zip(title_positions, content_positions):
if title_position is not None:
old_len = len(source)
source = np.append(source, src_item[title_position])
clm_target = np.append(clm_target, src_item[title_position])
prev_output_positions_list = prev_output_positions_list + list(range(old_len, len(source)))
if content_position is not None:
source = np.append(source, src_item[content_position])
sep_positions_list.append(len(source) - 1)
sep_positions_list = [v for v in sep_positions_list if v != 0 and v != len(source) - 1]
source = torch.LongTensor(np.append(source, self.src_dict.eos()))
clm_target = torch.LongTensor(clm_target)
return source, clm_target, prev_output_positions_list, sep_positions_list
def _get_example_for_boundary_detection(self, index, src_item):
""" TokenClassification
Task: sequence tagging
"""
source, _, _, sep_positions_list = self._parse_ocr_data(src_item)
tag_target = torch.from_numpy(np.full(len(source), 1)) # 0: pad 1: negative 2: positive
tag_target[0] = self.cls_pad
tag_target[-1] = self.cls_pad
tag_target[sep_positions_list] = 2
if self.debug_size_for_tag < 2:
self.debug_size_for_tag += 1
logger.info('========= index: {} == boundary detection ======='.format(str(index)))
logger.info('src_raw: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('src: ' + ''.join([self.src_dict[ii] for ii in source]))
logger.info('tag_target: ' + ''.join([str(ii.item()) for ii in tag_target]))
example = {
'id': index,
'source': source,
'cls_target': torch.LongTensor([self.cls_pad]),
'mlm_target': torch.from_numpy(np.full(len(source), self.src_dict.pad())),
'clm_target': torch.from_numpy(np.full(1, self.src_dict.pad())),
'tag_target': tag_target,
'prev_output_tokens': torch.from_numpy(np.full(1, 1)),
'prev_output_positions': torch.LongTensor([1]),
}
return example
def _create_dummy_data(self, task, **kwargs):
if task == 'cls':
src_label = torch.LongTensor([-1])
return src_label
if task == 'mlm':
mlm_target = torch.from_numpy(np.full(kwargs['src_sz'], self.src_dict.pad()))
return mlm_target
if task == 'clm':
prev_output_positions = torch.LongTensor([1])
prev_output_tokens = torch.from_numpy(np.full(1, 1))
clm_target = torch.from_numpy(np.full(1, self.src_dict.pad()))
return prev_output_positions, prev_output_tokens, clm_target
def _get_example_for_title_generation(self, index, src_item):
""" title generation
Task: CLM + MLM
"""
source, clm_target, prev_output_positions_list, _ = self._parse_ocr_data(src_item)
# build data for MLM (random mask)
mlm_positions = apply_random_mask(len(source), ignore_index=set(prev_output_positions_list))
masked_pos = sorted(list(set(prev_output_positions_list + mlm_positions)))
mlm_target = torch.from_numpy(np.full(len(source), self.src_dict.pad()))
mlm_target[mlm_positions] = source[mlm_positions]
# build data for CLM (mask all title)
prev_output_positions = np.array(prev_output_positions_list)
prev_output_tokens = source[prev_output_positions - 1].clone()
prev_output_positions = torch.LongTensor(prev_output_positions)
if self.debug_size_for_titlegen < 2:
logger.info('========= index: {} == title generation ======='.format(str(index)))
logger.info('src_raw: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('src: ' + ''.join([self.src_dict[ii] for ii in source]))
source[masked_pos] = self.replace(source[masked_pos])
if self.debug_size_for_titlegen < 2:
self.debug_size_for_titlegen += 1
logger.info('src_mask: ' + ''.join([self.src_dict[ii] for ii in source]))
logger.info('clm_pos: ' + ' '.join([str(v) for v in prev_output_positions_list]))
logger.info('clm_input: ' + ''.join([self.src_dict[ii] for ii in prev_output_tokens]))
logger.info('clm_target: ' + ''.join([self.src_dict[ii] for ii in clm_target]))
logger.info(
'mlm_target:' + ''.join([self.src_dict[ii] for ii in mlm_target if ii != self.src_dict.pad_index]))
if prev_output_tokens.numel() == 0:
prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm')
example = {
'id': index,
'source': source,
'cls_target': self._create_dummy_data('cls'),
'mlm_target': mlm_target,
'clm_target': clm_target,
'tag_target': torch.from_numpy(np.full(len(source), self.cls_pad)),
'prev_output_tokens': prev_output_tokens,
'prev_output_positions': prev_output_positions,
}
return example
def _get_example_for_cls(self, index, src_item, src_meta):
assert 'cls' in self.sub_task
src_meta = np.array([int(self.meta_dict[k]) if k != self.meta_dict.unk() else 10000 for k in src_meta])
src_sz = len(src_item)
assert len(src_meta) % 2 == 1
src_label, src_entity = torch.LongTensor(src_meta[:1]), src_meta[1:]
# build data for MLM & CLM
mlm_target = torch.from_numpy(np.full(src_sz, self.src_dict.pad()))
prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm')
if self.debug_size_for_cls < 2:
logger.info('========= index: {} ==== MLM and CLM mask ====='.format(str(index)))
logger.info('src: ' + ''.join([self.src_dict[ii] for ii in src_item]))
if self.debug_size_for_cls < 2:
self.debug_size_for_cls += 1
example = {
'id': index,
'source': src_item,
'cls_target': src_label,
'mlm_target': mlm_target,
'clm_target': clm_target,
'tag_target': torch.from_numpy(np.full(len(src_item), self.cls_pad)),
'prev_output_tokens': prev_output_tokens,
'prev_output_positions': prev_output_positions,
}
return example
def _get_example_for_mlm(self, index, src_item, src_meta):
assert 'mlm' in self.sub_task
src_sz = len(src_item)
src_label = src_meta[0]
src_entity = src_meta[1:]
src_label = torch.LongTensor([int(self.meta_dict[src_label])]) \
if src_label >= self.meta_dict.nspecial else self._create_dummy_data('cls')
src_entity = np.array([int(self.meta_dict[k]) for k in src_entity])
assert len(src_entity) % 2 == 0
src_entity = np.array(src_entity.reshape(-1, 2)) + 1 # offset for [CLS]
# build data for MLM in Encoder
mlm_positions_1 = apply_entity_mask_for_mlm(src_sz, src_entity) # BERT & entity
mlm_positions_2 = apply_random_mask(src_sz, ignore_index=set(mlm_positions_1)) # BERT
mlm_position_list = sorted(list(set(mlm_positions_1 + mlm_positions_2)))
assert len(mlm_positions_1) + len(mlm_positions_2) == len(mlm_position_list)
masked_pos_list = sorted(list(set(mlm_position_list)))
assert masked_pos_list[0] > 0 # no mask in bos
masked_pos = np.array(masked_pos_list)
mlm_target = torch.from_numpy(np.full(src_sz, self.src_dict.pad()))
mlm_target[mlm_position_list] = src_item[mlm_position_list]
# build data for CLM in Decoder
prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm')
if self.debug_size_for_mlm < 2:
logger.info('========= index: {} ==== MLM mask ====='.format(str(index)))
logger.info('src: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('src_entity: ' + ' '.join(
[''.join([self.src_dict[src_item[ii]] if ii < src_sz else '' for ii in range(ent[0], ent[1])]) for ent
in src_entity]))
src_item[masked_pos] = self.replace(src_item[masked_pos])
if self.debug_size_for_mlm < 2:
self.debug_size_for_mlm += 1
logger.info('src_mask: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('mlm_pos: ' + ' '.join([str(v) for v in mlm_position_list]))
logger.info(
'mlm_target:' + ''.join([self.src_dict[ii] for ii in mlm_target if ii != self.src_dict.pad_index]))
if prev_output_tokens.numel() == 0:
prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm')
example = {
'id': index,
'source': src_item,
'cls_target': src_label,
'mlm_target': mlm_target,
'clm_target': clm_target,
'tag_target': torch.from_numpy(np.full(len(src_item), self.cls_pad)),
'prev_output_tokens': prev_output_tokens,
'prev_output_positions': prev_output_positions,
}
return example
def _get_example_for_clm(self, index, src_item, src_meta):
assert 'clm' in self.sub_task
src_meta = np.array([int(self.meta_dict[k])
if k < self.meta_dict.nspecial else None for k in src_meta])
src_sz = len(src_item)
assert len(src_meta) % 2 == 1
src_label, src_entity = torch.LongTensor(src_meta[:1]), src_meta[1:]
src_entity = np.array(src_entity.reshape(-1, 2)) + 1
src_label = torch.LongTensor(np.array([None]))
# build data for CLM in Decoder
clm_position_list = np.array(apply_span_mask(src_sz-1) + 1) # start at 1
prev_output_positions = clm_position_list
prev_output_tokens = src_item[prev_output_positions - 1].clone()
clm_target = src_item[prev_output_positions].clone()
prev_output_positions = torch.LongTensor(prev_output_positions)
# build data for MLM in Encoder
mlm_position_list = []
mlm_target = torch.from_numpy(np.full(src_sz, self.src_dict.pad()))
masked_pos = prev_output_positions
if self.debug_size_for_clm < 2:
logger.info('========= index: {} ==== CLM Mask ====='.format(str(index)))
logger.info('src: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('src_entity: ' + ' '.join(
[''.join([self.src_dict[src_item[ii]] if ii < src_sz else '' for ii in range(ent[0], ent[1])]) for ent
in src_entity]))
src_item[masked_pos] = self.replace(src_item[masked_pos])
if self.debug_size_for_clm < 2:
self.debug_size_for_clm += 1
logger.info('src_mask: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('clm_pos: ' + ' '.join([str(v) for v in clm_position_list]))
logger.info('clm_input: ' + ''.join([self.src_dict[ii] for ii in prev_output_tokens]))
logger.info('clm_target: ' + ''.join([self.src_dict[ii] for ii in clm_target]))
logger.info('mlm_pos: ' + ' '.join([str(v) for v in mlm_position_list]))
logger.info(
'mlm_target:' + ''.join([self.src_dict[ii] for ii in mlm_target if ii != self.src_dict.pad_index]))
if prev_output_tokens.numel() == 0:
prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm')
example = {
'id': index,
'source': src_item,
'cls_target': src_label,
'mlm_target': mlm_target,
'clm_target': clm_target,
'tag_target': torch.from_numpy(np.full(len(src_item), self.cls_pad)),
'prev_output_tokens': prev_output_tokens,
'prev_output_positions': prev_output_positions,
}
return example
def _get_example_for_multitask(self, index, src_item, src_meta):
""" multi-task joint training
tasks:
- mlm: masked language model (encoder-only)
- clm: causal language model (encoder-decoder or decoder-only)
- sentcls: sentence classification (encoder-only)
- tokencls: token classification, sequence tagging (encoder-only)
- spancls: token span classification, such as relation classification, entity classification (encoder-only)
"""
assert 'clm' in self.sub_task or 'mlm' in self.sub_task
src_meta = np.array([int(self.meta_dict[k]) if k != self.meta_dict.unk() else 10000 for k in src_meta])
src_sz = len(src_item)
assert len(src_meta) % 2 == 1
src_label, src_entity = torch.LongTensor(src_meta[:1]), src_meta[1:]
src_entity = np.array(src_entity.reshape(-1, 2)) + 1 # offset for [CLS]
if 'sentcls' not in self.sub_task:
src_label = torch.LongTensor([self.cls_pad])
mlm_position_list, clm_position_list = [], []
if 'clm' in self.sub_task:
clm_position_list = apply_span_mask(src_sz)
prev_output_positions = np.array(clm_position_list)
if 'mlm' in self.sub_task:
mlm_positions_1 = apply_entity_mask_for_mlm(src_sz, src_entity,
ignore_index=set(clm_position_list)) # BERT & entity
mlm_positions_2 = apply_random_mask(src_sz, ignore_index=set(clm_position_list + mlm_positions_1)) # BERT
mlm_position_list = sorted(list(set(mlm_positions_1 + mlm_positions_2)))
assert len(mlm_positions_1) + len(mlm_positions_2) == len(mlm_position_list)
masked_pos_list = sorted(list(set(clm_position_list + mlm_position_list)))
assert len(clm_position_list) + len(mlm_position_list) == len(masked_pos_list)
assert masked_pos_list[0] > 0
masked_pos = np.array(masked_pos_list)
# build data for CLM in Decoder
prev_output_tokens = src_item[prev_output_positions - 1].clone()
clm_target = src_item[prev_output_positions].clone()
prev_output_positions = torch.LongTensor(prev_output_positions)
# build data for MLM in Encoder
mlm_target = torch.from_numpy(np.full(src_sz, self.src_dict.pad()))
mlm_target[mlm_position_list] = src_item[mlm_position_list]
if self.debug_size_for_mlm < 2:
logger.info('========= index: {} ==== MLM and CLM mask ====='.format(str(index)))
logger.info('src: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('src_entity: ' + ' '.join(
[''.join([self.src_dict[src_item[ii]] if ii < src_sz else '' for ii in range(ent[0], ent[1])]) for ent
in src_entity]))
src_item[masked_pos] = self.replace(src_item[masked_pos])
if self.debug_size_for_mlm < 2:
self.debug_size_for_mlm += 1
logger.info('src_mask: ' + ''.join([self.src_dict[ii] for ii in src_item]))
logger.info('clm_pos: ' + ' '.join([str(v) for v in clm_position_list]))
logger.info('clm_input: ' + ''.join([self.src_dict[ii] for ii in prev_output_tokens]))
logger.info('clm_target: ' + ''.join([self.src_dict[ii] for ii in clm_target]))
logger.info('mlm_pos: ' + ' '.join([str(v) for v in mlm_position_list]))
logger.info(
'mlm_target:' + ''.join([self.src_dict[ii] for ii in mlm_target if ii != self.src_dict.pad_index]))
if prev_output_tokens.numel() == 0:
prev_output_positions, prev_output_tokens, clm_target = self._create_dummy_data('clm')
example = {
'id': index,
'source': src_item,
'cls_target': src_label,
'mlm_target': mlm_target,
'clm_target': clm_target,
'tag_target': torch.from_numpy(np.full(len(src_item), self.cls_pad)),
'prev_output_tokens': prev_output_tokens,
'prev_output_positions': prev_output_positions,
}
return example
def __getitem__(self, index):
"""
src: plain text
meta:
- content: cls_label ent1_start ent1_end ent2_start ent2_end
- desc: cls_label 0 represent no label, it should be skipped in cls task.
TODO:
dynamic_span_length, dynamic_total_length
"""
src_item = self.src[index]
src_meta = self.meta[index]
sep_sz = (src_item == self.src_dict.eos()).sum()
if sep_sz > 1: # ocr data tasks: titlegen segcls, sentcls
if 'titlegen' in self.sub_task and 'segcls' in self.sub_task:
task_selector = random.random()
if task_selector > 0.5:
example = self._get_example_for_title_generation(index, src_item)
else:
example = self._get_example_for_title_generation(index, src_item)
# example = self._get_example_for_boundary_detection(index, src_item) # 这个再确认一下
elif 'segcls' in self.sub_task:
example = self._get_example_for_boundary_detection(index, src_item)
elif 'titlegen' in self.sub_task:
example = self._get_example_for_title_generation(index, src_item)
else:
return
return example
else: # product summary data tasks:
task_selector = random.random()
if task_selector > 0:
# if task_selector < 0:
# if task_selector < 0.4:
return self._get_example_for_mlm(index, src_item, src_meta)
elif task_selector < 0.7:
# elif task_selector < 2:
return self._get_example_for_clm(index, src_item, src_meta)
else:
return self._get_example_for_clm(index, src_item, src_meta)
# return self._get_example_for_cls(index, src_item, src_meta) #
return self._get_example_for_multitask(index, src_item, src_meta)
def collater(self, samples):
return collate(samples, self.src_dict.pad(), self.src_dict.eos())
def replace(self, x):
_x_real = x
_x_rand = _x_real.clone().random_(self.src_dict.nspecial, len(self.src_dict))
_x_mask = _x_real.clone().fill_(self.mask_idx)
probs = torch.multinomial(self.pred_probs, len(x), replacement=True)
_x = _x_mask * (probs == 0).long() + \
_x_real * (probs == 1).long() + \
_x_rand * (probs == 2).long()
return _x
|
[
"fairseq.data.data_utils.collate_tokens",
"numpy.full",
"torch.LongTensor",
"torch.FloatTensor",
"random.random",
"numpy.append",
"numpy.array",
"numpy.arange",
"logging.getLogger"
] |
[((488, 515), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (505, 515), False, 'import logging\n'), ((842, 978), 'fairseq.data.data_utils.collate_tokens', 'data_utils.collate_tokens', (['[s[key] for s in samples]', 'pad_idx', 'eos_idx', 'left_pad', 'move_eos_to_beginning'], {'pad_to_length': 'pad_to_length'}), '([s[key] for s in samples], pad_idx, eos_idx,\n left_pad, move_eos_to_beginning, pad_to_length=pad_to_length)\n', (867, 978), False, 'from fairseq.data import data_utils\n'), ((4174, 4282), 'torch.FloatTensor', 'torch.FloatTensor', (['[1 - leave_unmasked_prob - random_token_prob, leave_unmasked_prob,\n random_token_prob]'], {}), '([1 - leave_unmasked_prob - random_token_prob,\n leave_unmasked_prob, random_token_prob])\n', (4191, 4282), False, 'import torch\n'), ((5364, 5382), 'numpy.array', 'np.array', (['src_item'], {}), '(src_item)\n', (5372, 5382), True, 'import numpy as np\n'), ((5577, 5614), 'numpy.array', 'np.array', (['[]'], {'dtype': 'src_item_np.dtype'}), '([], dtype=src_item_np.dtype)\n', (5585, 5614), True, 'import numpy as np\n'), ((6473, 6501), 'torch.LongTensor', 'torch.LongTensor', (['clm_target'], {}), '(clm_target)\n', (6489, 6501), False, 'import torch\n'), ((9216, 9252), 'numpy.array', 'np.array', (['prev_output_positions_list'], {}), '(prev_output_positions_list)\n', (9224, 9252), True, 'import numpy as np\n'), ((9356, 9395), 'torch.LongTensor', 'torch.LongTensor', (['prev_output_positions'], {}), '(prev_output_positions)\n', (9372, 9395), False, 'import torch\n'), ((13328, 13353), 'numpy.array', 'np.array', (['masked_pos_list'], {}), '(masked_pos_list)\n', (13336, 13353), True, 'import numpy as np\n'), ((15920, 15959), 'torch.LongTensor', 'torch.LongTensor', (['prev_output_positions'], {}), '(prev_output_positions)\n', (15936, 15959), False, 'import torch\n'), ((19066, 19093), 'numpy.array', 'np.array', (['clm_position_list'], {}), '(clm_position_list)\n', (19074, 19093), True, 'import numpy as np\n'), ((19839, 19864), 'numpy.array', 'np.array', (['masked_pos_list'], {}), '(masked_pos_list)\n', (19847, 19864), True, 'import numpy as np\n'), ((20072, 20111), 'torch.LongTensor', 'torch.LongTensor', (['prev_output_positions'], {}), '(prev_output_positions)\n', (20088, 20111), False, 'import torch\n'), ((1228, 1272), 'torch.LongTensor', 'torch.LongTensor', (["[s['id'] for s in samples]"], {}), "([s['id'] for s in samples])\n", (1244, 1272), False, 'import torch\n'), ((7555, 7587), 'torch.LongTensor', 'torch.LongTensor', (['[self.cls_pad]'], {}), '([self.cls_pad])\n', (7571, 7587), False, 'import torch\n'), ((7895, 7916), 'torch.LongTensor', 'torch.LongTensor', (['[1]'], {}), '([1])\n', (7911, 7916), False, 'import torch\n'), ((8052, 8074), 'torch.LongTensor', 'torch.LongTensor', (['[-1]'], {}), '([-1])\n', (8068, 8074), False, 'import torch\n'), ((8312, 8333), 'torch.LongTensor', 'torch.LongTensor', (['[1]'], {}), '([1])\n', (8328, 8333), False, 'import torch\n'), ((11267, 11297), 'torch.LongTensor', 'torch.LongTensor', (['src_meta[:1]'], {}), '(src_meta[:1])\n', (11283, 11297), False, 'import torch\n'), ((15420, 15450), 'torch.LongTensor', 'torch.LongTensor', (['src_meta[:1]'], {}), '(src_meta[:1])\n', (15436, 15450), False, 'import torch\n'), ((15563, 15579), 'numpy.array', 'np.array', (['[None]'], {}), '([None])\n', (15571, 15579), True, 'import numpy as np\n'), ((18662, 18692), 'torch.LongTensor', 'torch.LongTensor', (['src_meta[:1]'], {}), '(src_meta[:1])\n', (18678, 18692), False, 'import torch\n'), ((18855, 18887), 'torch.LongTensor', 'torch.LongTensor', (['[self.cls_pad]'], {}), '([self.cls_pad])\n', (18871, 18887), False, 'import torch\n'), ((23391, 23406), 'random.random', 'random.random', ([], {}), '()\n', (23404, 23406), False, 'import random\n'), ((5882, 5925), 'numpy.append', 'np.append', (['source', 'src_item[title_position]'], {}), '(source, src_item[title_position])\n', (5891, 5925), True, 'import numpy as np\n'), ((5955, 6002), 'numpy.append', 'np.append', (['clm_target', 'src_item[title_position]'], {}), '(clm_target, src_item[title_position])\n', (5964, 6002), True, 'import numpy as np\n'), ((6181, 6226), 'numpy.append', 'np.append', (['source', 'src_item[content_position]'], {}), '(source, src_item[content_position])\n', (6190, 6226), True, 'import numpy as np\n'), ((7842, 7855), 'numpy.full', 'np.full', (['(1)', '(1)'], {}), '(1, 1)\n', (7849, 7855), True, 'import numpy as np\n'), ((8384, 8397), 'numpy.full', 'np.full', (['(1)', '(1)'], {}), '(1, 1)\n', (8391, 8397), True, 'import numpy as np\n'), ((22642, 22657), 'random.random', 'random.random', ([], {}), '()\n', (22655, 22657), False, 'import random\n'), ((4958, 4986), 'numpy.arange', 'np.arange', (['(last_pos + 1)', 'pos'], {}), '(last_pos + 1, pos)\n', (4967, 4986), True, 'import numpy as np\n')]
|
#! /usr/bin/python
import numpy as np
import math
from scipy.spatial import KDTree
import openravepy as orpy
import transformations
from robotiqloader import RobotiqHand, InvalidTriangleException
import sys, time, logging, copy
import itertools
from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range
import rospy
import scipy.optimize
class PlanningSceneInterface(object):
def __init__(self, or_env, robot_name):
""" Sets scene information for grasp planning that considers the whole robot.
@param or_env OpenRAVE environment containing the whole planning scene and robot
@param robot_name Name of the robot on which the hand is attached (for ik computations)
"""
self._or_env = or_env
self._robot = or_env.GetRobot(robot_name)
self._manip = self._robot.GetActiveManipulator()
self._arm_ik = orpy.databases.inversekinematics.InverseKinematicsModel(self._robot,
iktype=orpy.IkParameterization.Type.Transform6D)
# Make sure we have an ik solver
if not self._arm_ik.load():
rospy.loginfo('No IKFast solver found. Generating new one...')
self._arm_ik.autogenerate()
self._object = None
def set_target_object(self, obj_name):
self._object = self._or_env.GetKinBody(obj_name)
def check_arm_ik(self, hand_pose_object, grasp_conf, seed, open_hand_offset):
with self._or_env:
# compute target pose in world frame
object_pose = self._object.GetTransform()
hand_pose_scene = np.dot(object_pose, hand_pose_object)
# save current state
dof_values = self._robot.GetDOFValues()
# if we have a seed set it
arm_dofs = self._manip.GetArmIndices()
hand_dofs = self._manip.GetGripperIndices()
if seed is not None:
self._robot.SetDOFValues(seed, dofindices=arm_dofs)
# Compute a pre-grasp hand configuration and set it
pre_grasp_conf = np.asarray(grasp_conf) - open_hand_offset
lower_limits, upper_limits = self._robot.GetDOFLimits(hand_dofs)
pre_grasp_conf = np.asarray(clamp(pre_grasp_conf, lower_limits, upper_limits))
self._robot.SetDOFValues(pre_grasp_conf, dofindices=hand_dofs)
# Now find an ik solution for the target pose with the hand in the pre-grasp configuration
sol = self._manip.FindIKSolution(hand_pose_scene, orpy.IkFilterOptions.CheckEnvCollisions)
# sol = self.seven_dof_ik(hand_pose_scene, orpy.IkFilterOptions.CheckEnvCollisions)
# If that didn't work, try to compute a solution that is in collision (may be useful anyways)
if sol is None:
# sol = self.seven_dof_ik(hand_pose_scene, orpy.IkFilterOptions.IgnoreCustomFilters)
sol = self._manip.FindIKSolution(hand_pose_scene, orpy.IkFilterOptions.IgnoreCustomFilters)
b_sol_col_free = False
else:
b_sol_col_free = True
# Restore original dof values
self._robot.SetDOFValues(dof_values)
return b_sol_col_free, sol, pre_grasp_conf
class HFTSSampler:
def __init__(self, object_io_interface, scene_interface=None, verbose=False, num_hops=2, vis=False):
self._verbose = verbose
self._sampler_viewer = vis
self._orEnv = orpy.Environment() # create openrave environment
self._orEnv.SetDebugLevel(orpy.DebugLevel.Fatal)
self._orEnv.GetCollisionChecker().SetCollisionOptions(orpy.CollisionOptions.Contacts)
if vis:
self._orEnv.SetViewer('qtcoin') # attach viewer (optional)
self._or_handles = []
else:
self._or_handles = None
self._scene_or_env = None
self._hand_loaded = False
self._scene_interface = scene_interface
self._obj_loaded = False
self._max_iters = 40
self._reachability_weight = 1.0
self._mu = 2.0
self._min_stability = 0.0
self._b_force_new_hfts = False
self._object_kd_tree = None
self._object_points = None
# self._hops = num_hops
# TODO remove this aga
self._hops = 2
self._robot = None
self._obj = None
self._obj_com = None
self._data_labeled = None
self._hand_manifold = None
self._num_contacts = None
self._contact_combinations = []
self._num_levels = 0
self._branching_factors = []
self._object_io_interface = object_io_interface
def __del__(self):
orpy.RaveDestroy()
def check_arm_grasp_validity(self, grasp_conf, grasp_pose, seed, open_hand_offset=0.1):
if self._scene_interface is None:
#TODO Think about what we should do in this case (planning with free-floating hand)
return True, None, None
object_hfts_pose = self._obj.GetTransform() # pose in environment used for contact planning
hand_pose_object_frame = np.dot(np.linalg.inv(object_hfts_pose), grasp_pose)
# hand_pose_world = np.dot(object_hfts_pose, grasp_pose)
collision_free, arm_conf, pre_grasp_conf = \
self._scene_interface.check_arm_ik(hand_pose_object_frame,
grasp_conf,
seed=seed,
open_hand_offset=open_hand_offset)
return collision_free, arm_conf, pre_grasp_conf
def check_grasp_validity(self):
# Check whether the hand is collision free
if self._robot.CheckSelfCollision():
return False
real_contacts = self.get_real_contacts()
# self.draw_contacts(real_contacts)
stability = compute_grasp_stability(grasp_contacts=real_contacts,
mu=self._mu)
return stability > self._min_stability and self.is_grasp_collision_free()
def create_object_kd_tree(self, points):
self._object_kd_tree = KDTree(points[:, :3])
self._object_points = points
def compute_allowed_contact_combinations(self, depth, label_cache):
# Now, for this parent get all possible contacts
allowed_finger_combos = set(self._contact_combinations[depth])
# Next, we want to filter out contact combinations that are stored in labelCache
forbidden_finger_combos = set()
for grasp_label in label_cache:
finger_combo = tuple([x[-1] for x in grasp_label])
forbidden_finger_combos.add(finger_combo)
# Filter them out
allowed_finger_combos.difference_update(forbidden_finger_combos)
return list(allowed_finger_combos)
def compute_contact_combinations(self):
while len(self._contact_combinations) < self._num_levels:
self._contact_combinations.append([])
for i in range(self._num_levels):
self._contact_combinations[i] = set(itertools.product(range(self._branching_factors[i]),
repeat=self._num_contacts))
def compose_grasp_info(self, contact_labels):
contacts = [] # a list of contact positions and normals
for i in range(self._num_contacts):
p, n = self.get_cluster_repr(contact_labels[i])
contacts.append(list(p) + list(n))
object_contacts = np.asarray(contacts)
code_tmp = self._hand_manifold.encode_grasp(object_contacts)
dummy, grasp_conf = self._hand_manifold.predict_hand_conf(code_tmp)
hand_contacts = self._robot.get_ori_tip_pn(grasp_conf)
return grasp_conf, object_contacts, hand_contacts
def _debug_visualize_quality(self, labels, quality, handles):
grasp_conf, object_contacts, hand_contacts = self.compose_grasp_info(labels)
self._robot.SetVisible(False)
handles.append(self._draw_contacts_quality(object_contacts, quality))
def _draw_contacts_quality(self, object_contacts, quality):
colors = [[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]
quality = min(abs(quality), 0.005)
width = 0.003
length = max((1.0 - abs(quality) / 0.005) * 0.05, 0.001)
# Draw planned contacts
arrow_handles = []
for i in range(object_contacts.shape[0]):
arrow_handles.append(self._orEnv.drawarrow(object_contacts[i, :3],
object_contacts[i, :3] - length * object_contacts[i, 3:],
width, colors[i]))
return arrow_handles
def _debug_visualize(self, labels, handle_index=-1):
grasp_conf, object_contacts, hand_contacts = self.compose_grasp_info(labels)
rospy.logwarn('Debug visualize')
# self._robot.SetVisible(False)
# self.draw_contacts(object_contacts, handle_index=handle_index)
# time.sleep(1.0)
# self._robot.SetVisible(True)
def draw_contacts(self, object_contacts, handle_index=-1):
if len(self._or_handles) == 0:
self._or_handles.append(None)
self._or_handles.append(None)
# TODO this is hard coded for three contacts
colors = [[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]
if handle_index != 0:
width = 0.003
length = 0.05
else:
width = 0.001
length = 0.1
# Draw planned contacts
arrow_handles = []
for i in range(object_contacts.shape[0]):
arrow_handles.append(self._orEnv.drawarrow(object_contacts[i, :3],
object_contacts[i, :3] - length * object_contacts[i, 3:],
width, colors[i]))
self._or_handles[handle_index] = arrow_handles
def evaluate_grasp(self, contact_label):
contacts = [] # a list of contact positions and normals
for i in range(self._num_contacts):
p, n = self.get_cluster_repr(contact_label[i])
contacts.append(list(p) + list(n))
contacts = np.asarray(contacts)
# self.draw_contacts(contacts)
s_tmp = self._hand_manifold.compute_grasp_quality(self._obj_com, contacts)
code_tmp = self._hand_manifold.encode_grasp(contacts)
r_tmp, dummy = self._hand_manifold.predict_hand_conf(code_tmp)
# TODO: Research topic. This is kind of hack. Another objective function might be better
# o_tmp = s_tmp / (r_tmp + 0.000001)
o_tmp = s_tmp - self._reachability_weight * r_tmp
assert not math.isnan(o_tmp) and not math.isinf(math.fabs(o_tmp))
# o_tmp = s_tmp / (r_tmp + 1.0)
# return s_tmp, r_tmp, o_tmp
return s_tmp, r_tmp, -r_tmp
def extend_hfts_node(self, old_labels, allowed_finger_combos=None):
new_depth = len(old_labels[0]) # a label has length depth + 1
if allowed_finger_combos is not None:
fingertip_assignments = np.random.choice(allowed_finger_combos)
else:
fingertip_assignments = np.random.choice(self._branching_factors[new_depth],
self._num_contacts,
replace=True)
for label, assignment in itertools.izip(old_labels, fingertip_assignments):
label.append(assignment)
s_tmp, r_tmp, o_tmp = self.evaluate_grasp(old_labels)
# self._debug_visualize(old_labels, 0)
return o_tmp, old_labels
def get_branch_information(self, level):
if level < self.get_maximum_depth():
possible_num_children = pow(self._branching_factors[level] + 1, self._num_contacts)
possible_num_leaves = 1
for d in range(level, self.get_maximum_depth()):
possible_num_leaves *= pow(self._branching_factors[level] + 1, self._num_contacts)
else:
possible_num_children = 0
possible_num_leaves = 1
return possible_num_children, possible_num_leaves
def get_cluster_repr(self, label):
level = len(label) - 1 # indexed from 0
idx = np.where((self._data_labeled[:, 6:7 + level] == label).all(axis=1))
points = [self._data_labeled[t, 0:3] for t in idx][0]
normals = [self._data_labeled[t, 3:6] for t in idx][0]
pos = np.sum(points, axis=0) / len(idx[0])
normal = np.sum(normals, axis=0) / len(idx[0])
normal /= np.linalg.norm(normal)
return pos, -normal
def get_maximum_depth(self):
return self._num_levels
def get_or_hand(self):
return self._robot
def get_random_sibling_label(self, label):
ret = []
if len(label) <= self._hops / 2:
for i in range(len(label)):
ret.append(np.random.randint(self._branching_factors[i]))
else:
match_len = len(label) - self._hops / 2
ret = label[:match_len]
for i in range(len(label) - match_len):
ret.append(np.random.randint(self._branching_factors[i + match_len]))
return ret
def get_random_sibling_labels(self, curr_labels, allowed_finger_combos=None):
labels_tmp = []
if allowed_finger_combos is None:
for i in range(self._num_contacts):
tmp = self.get_random_sibling_label(curr_labels[i])
labels_tmp.append(tmp)
else:
finger_combo = np.random.choice(allowed_finger_combos)
for i in range(self._num_contacts):
tmp = list(curr_labels[i])
tmp[-1] = finger_combo[i]
labels_tmp.append(tmp)
return labels_tmp
def get_real_contacts(self):
collision_report = orpy.CollisionReport()
real_contacts = []
# iterate over all fingertip links and determine the contacts
for eel in self._robot.get_fingertip_links():
link = self._robot.GetLink(eel)
self._orEnv.CheckCollision(self._obj, link, report=collision_report)
# self._orEnv.CheckCollision(link, self._obj, report=collision_report)
if len(collision_report.contacts) == 0:
raise ValueError('[HFTSSampler::get_real_contacts] No contacts found')
# TODO the normals reported by the collision check are wrong, so instead we use a nearest
# TODO neighbor lookup. Should see what's wrong with OpenRAVE here...
position = collision_report.contacts[0].pos
normal = self._object_points[self._object_kd_tree.query(position), 3:][1]
# normal = collision_report.contacts[0].norm
real_contacts.append(np.concatenate((position, normal)))
real_contacts = np.asarray(real_contacts)
return real_contacts
def get_root_node(self):
possible_num_children, possible_num_leaves = self.get_branch_information(0)
return HFTSNode(num_possible_children=possible_num_children,
num_possible_leaves=possible_num_leaves)
def is_grasp_collision_free(self):
links = self._robot.get_non_fingertip_links()
for link in links:
if self._orEnv.CheckCollision(self._robot.GetLink(link)):
return False
return True
def load_hand(self, hand_file, hand_cache_file):
if not self._hand_loaded:
# TODO make this Robotiq hand independent (external hand loader)
self._robot = RobotiqHand(hand_cache_file=hand_cache_file,
env=self._orEnv, hand_file=hand_file)
self._hand_manifold = self._robot.get_hand_manifold()
self._hand_manifold.load()
self._num_contacts = self._robot.get_contact_number()
shift = transformations.identity_matrix()
shift[0, -1] = 0.2
self._robot.SetTransform(shift)
rospy.loginfo('Hand loaded in OpenRAVE environment')
self._hand_loaded = True
def load_object(self, obj_id, model_id=None):
if model_id is None:
model_id = obj_id
self._data_labeled, self._branching_factors, self._obj_com = \
self._object_io_interface.get_hfts(model_id, self._b_force_new_hfts)
if self._data_labeled is None:
raise RuntimeError('Could not load HFTS model for model ' + model_id)
self.create_object_kd_tree(self._data_labeled[:, :6])
self._num_levels = len(self._branching_factors)
# First, delete old object if there is any
if self._obj_loaded:
self._orEnv.Remove(self._obj)
or_file_name = self._object_io_interface.get_openrave_file_name(model_id)
self._obj_loaded = self._orEnv.Load(or_file_name)
if not self._obj_loaded:
raise RuntimeError('Could not load object model %s in OpenRAVE' % model_id)
self._obj = self._orEnv.GetKinBody('objectModel')
rospy.loginfo('Object loaded in OpenRAVE environment')
if self._scene_interface is not None:
self._scene_interface.set_target_object(obj_id)
self.compute_contact_combinations()
self._obj_loaded = True
import IPython
IPython.embed()
def sample_grasp(self, node, depth_limit, post_opt=False, label_cache=None, open_hand_offset=0.1):
if depth_limit < 0:
raise ValueError('HFTSSampler::sample_grasp depth limit must be greater or equal to zero.')
if node.get_depth() >= self._num_levels:
raise ValueError('HFTSSampler::sample_grasp input node has an invalid depth')
if node.get_depth() + depth_limit >= self._num_levels:
depth_limit = self._num_levels - node.get_depth() # cap
# In case we using the integrated method, we might have a limitation on what nodes to descend to
# let's compute this set.
allowed_finger_combos = None
if label_cache is not None and depth_limit == 1:
# TODO This currently only works for hops == 2
assert self._hops == 2
allowed_finger_combos = self.compute_allowed_contact_combinations(node.get_depth(), label_cache)
rospy.logdebug('[HFTSSampler::sample_grasp] We have %i allowed contacts' % len(allowed_finger_combos))
if len(allowed_finger_combos) == 0:
rospy.logwarn('[HFTSSampler::sample_grasp] We have no allowed contacts left! Aborting.')
return node
elif label_cache is not None and depth_limit != 1:
raise ValueError('[HFTSSampler::sample_grasp] Label cache only works for depth_limit == 1')
# Now, get a node to start stochastic optimization from
seed_ik = None
if node.get_depth() == 0: # at root
contact_label = self.pick_new_start_node()
best_o = -np.inf # need to also consider non-root nodes
else:
# If we are not at a leaf node, go down in the hierarchy
seed_ik = node.get_arm_configuration()
contact_label = copy.deepcopy(node.get_labels())
best_o, contact_label = self.extend_hfts_node(contact_label,
allowed_finger_combos=allowed_finger_combos)
self.reset_robot()
depth_limit -= 1
rospy.logdebug('[HFTSSampler::sample_grasp] Sampling a grasp; %i number of iterations' % self._max_iters)
# Do stochastic optimization until depth_limit is reached
while depth_limit >= 0:
# Randomly select siblings to optimize the objective function
for iter_now in range(self._max_iters):
labels_tmp = self.get_random_sibling_labels(curr_labels=contact_label,
allowed_finger_combos=allowed_finger_combos)
s_tmp, r_tmp, o_tmp = self.evaluate_grasp(labels_tmp)
if self.shc_evaluation(o_tmp, best_o):
contact_label = labels_tmp
best_o = o_tmp
# self._debug_visualize(labels_tmp, handle_index=0)
# Descend to next level if we iterate at least once more
if depth_limit > 0:
best_o, contact_label = self.extend_hfts_node(contact_label)
depth_limit -= 1
# Evaluate grasp on robot hand
# First, determine a hand configuration and the contact locations
grasp_conf, object_contacts, hand_contacts = self.compose_grasp_info(contact_label)
# Simulate the grasp and do local adjustments
b_robotiq_ok, grasp_conf, grasp_pose = self.simulate_grasp(grasp_conf=grasp_conf,
hand_contacts=hand_contacts,
object_contacts=object_contacts,
post_opt=post_opt,
swap_contacts=label_cache is None)
if b_robotiq_ok:
sample_q = 0
stability = best_o
else:
sample_q = 4
stability = 0.0
# except InvalidTriangleException:
# grasp_conf = None
# sample_q = 4
# stability = 0.0
is_leaf = (len(contact_label[0]) == self._num_levels)
is_goal_sample = (sample_q == 0) and is_leaf
if not is_goal_sample and grasp_conf is not None:
rospy.logdebug('[HFTSSampler::sample_grasp] Approximate has final quality: %i' % sample_q)
b_approximate_feasible = self._robot.avoid_collision_at_fingers(n_step=20)
if b_approximate_feasible:
grasp_conf = self._robot.GetDOFValues()
open_hand_offset = 0.0
logging.debug('[HFTSSampler::sample_grasp] We sampled a grasp on level ' + str(len(contact_label[0])))
if is_goal_sample:
logging.debug('[HFTSSampler::sample_grasp] We sampled a goal grasp (might be in collision)!')
if is_leaf:
logging.debug('[HFTSSampler::sample_grasp] We sampled a leaf')
if grasp_conf is not None and grasp_pose is not None:
collision_free_arm_ik, arm_conf, pre_grasp_conf = \
self.check_arm_grasp_validity(grasp_conf=grasp_conf,
grasp_pose=grasp_pose,
seed=seed_ik, open_hand_offset=open_hand_offset)
else:
collision_free_arm_ik = False
arm_conf = None
pre_grasp_conf = None
depth = len(contact_label[0])
possible_num_children, possible_num_leaves = self.get_branch_information(depth)
return HFTSNode(labels=contact_label, hand_conf=np.asarray(grasp_conf),
pre_grasp_conf=pre_grasp_conf, arm_conf=arm_conf,
is_goal=is_goal_sample, is_leaf=is_leaf, is_valid=collision_free_arm_ik,
num_possible_children=possible_num_children, num_possible_leaves=possible_num_leaves,
hand_transform=self._robot.GetTransform())
def set_max_iter(self, m):
assert m > 0
self._max_iters = m
def set_parameters(self, max_iters=None, reachability_weight=None,
com_center_weight=None, hfts_generation_params=None,
b_force_new_hfts=None):
# TODO some of these parameters are Robotiq hand specific. We probably wanna pass them as dictionary
if max_iters is not None:
self._max_iters = max_iters
assert self._max_iters > 0
if reachability_weight is not None:
self._reachability_weight = reachability_weight
assert self._reachability_weight >= 0.0
# TODO this is Robotiq hand specific, and outdated
self._hand_manifold.set_parameters(com_center_weight)
if hfts_generation_params is not None:
self._object_io_interface.set_hfts_generation_parameters(hfts_generation_params)
if b_force_new_hfts is not None:
self._b_force_new_hfts = b_force_new_hfts
def shc_evaluation(self, o_tmp, best_o):
if best_o < o_tmp:
return True
else:
return False
def _simulate_grasp(self, grasp_conf, hand_contacts, object_contacts, post_opt=False):
# self.draw_contacts(object_contacts)
self._robot.SetDOFValues(grasp_conf)
try:
T = self._robot.hand_obj_transform(hand_contacts[:3, :3], object_contacts[:, :3])
self._robot.SetTransform(T)
except InvalidTriangleException as ite:
logging.warn('[HFTSSampler::simulate_grasp] Caught an InvalidTriangleException: ' + str(ite))
return False, grasp_conf, None
if post_opt:
self._post_optimization(object_contacts)
open_success, tips_in_contact = self._robot.comply_fingertips()
if not open_success or not tips_in_contact:
return False, self._robot.GetDOFValues(), self._robot.GetTransform()
if self.check_grasp_validity():
return True, self._robot.GetDOFValues(), self._robot.GetTransform()
return False, self._robot.GetDOFValues(), self._robot.GetTransform()
def simulate_grasp(self, grasp_conf, hand_contacts, object_contacts, post_opt=False, swap_contacts=True):
# TODO this method as it is right now is only useful for the Robotiq hand.
b_grasp_valid, grasp_conf, grasp_pose = self._simulate_grasp(grasp_conf, hand_contacts, object_contacts, post_opt)
if not b_grasp_valid and swap_contacts:
self.swap_contacts([0, 1], object_contacts)
b_grasp_valid, grasp_conf, grasp_pose = self._simulate_grasp(grasp_conf, hand_contacts, object_contacts, post_opt)
return b_grasp_valid, grasp_conf, grasp_pose
@staticmethod
def swap_contacts(rows, object_contacts):
frm = rows[0]
to = rows[1]
object_contacts[[frm, to], :] = object_contacts[[to, frm], :]
def reset_robot(self):
shift = transformations.identity_matrix()
shift[0, -1] = 0.2
self._robot.SetTransform(shift)
# Set hand to default (mean) configuration
mean_values = map(lambda min_v, max_v: (min_v + max_v) / 2.0,
self._robot.GetDOFLimits()[0],
self._robot.GetDOFLimits()[1])
self._robot.SetDOFValues(mean_values, range(len(mean_values)))
def pick_new_start_node(self):
num_nodes_top_level = self._branching_factors[0]
contact_label = []
for i in range(self._num_contacts):
contact_label.append([np.random.choice(range(num_nodes_top_level + 1))])
return contact_label
def plot_clusters(self, contact_labels):
if not self._sampler_viewer:
return
self.cloud_plot = []
colors = [np.array((1,0,0)), np.array((0,1,0)), np.array((0,0,1))]
for i in range(3):
label = contact_labels[i]
level = len(label) - 1 # indexed from 0
idx = np.where((self._data_labeled[:, 6:7 + level] == label).all(axis=1))
points = [self._data_labeled[t, 0:3] for t in idx][0]
points = np.asarray(points)
self.cloud_plot.append(self._orEnv.plot3(points=points, pointsize=0.006, colors=colors[i], drawstyle=1))
def _post_optimization(self, grasp_contacts):
logging.info('[HFTSSampler::_post_optimization] Performing post optimization.')
transform = self._robot.GetTransform()
angle, axis, point = transformations.rotation_from_matrix(transform)
# further optimize hand configuration and pose
# TODO this is Robotiq hand specific
transform_params = axis.tolist() + [angle] + transform[:3, 3].tolist()
robot_dofs = self._robot.GetDOFValues().tolist()
def joint_limits_constraint(x, *args):
positions, normals, robot = args
lower_limits, upper_limits = robot.GetDOFLimits()
return -dist_in_range(x[0], [lower_limits[0], upper_limits[0]]) - \
dist_in_range(x[1], [lower_limits[1], upper_limits[1]])
def collision_free_constraint(x, *args):
positions, normals, robot = args
config = [x[0], x[1]]
robot.SetDOFValues(config)
env = robot.GetEnv()
links = robot.get_non_fingertip_links()
for link in links:
if env.CheckCollision(robot.GetLink(link)):
return -1.0
return 0.0
x_min = scipy.optimize.fmin_cobyla(self._post_optimization_obj_fn, robot_dofs + transform_params,
[joint_limits_constraint, collision_free_constraint],
rhobeg=.5, rhoend=1e-3,
args=(grasp_contacts[:, :3], grasp_contacts[:, 3:], self._robot),
maxfun=int(1e8), iprint=0)
self._robot.SetDOFValues(x_min[:2])
axis = x_min[2:5]
angle = x_min[5]
position = x_min[6:]
transform = transformations.rotation_matrix(angle, axis)
transform[:3, 3] = position
self._robot.SetTransform(transform)
@staticmethod
def _post_optimization_obj_fn(x, *params):
# TODO this is Robotiq hand specific
desired_contact_points, desired_contact_normals, robot = params
dofs = x[:2]
robot.SetDOFValues(dofs)
axis = x[2:5]
angle = x[5]
position = x[6:]
transform = transformations.rotation_matrix(angle, axis)
transform[:3, 3] = position
robot.SetTransform(transform)
contacts = robot.get_tip_pn()
temp_positions = contacts[:, :3]
temp_normals = contacts[:, 3:]
pos_err = position_distance(desired_contact_points, temp_positions)
normal_err = normal_distance(desired_contact_normals, temp_normals)
return pos_err + normal_err
class HFTSNode:
def __init__(self, labels=None, hand_conf=None, hand_transform=None,
pre_grasp_conf=None, arm_conf=None, is_leaf=False, is_valid=False, is_goal=False,
num_possible_children=0, num_possible_leaves=0, quality=0.0):
# None values represent the root node
if labels is None:
self._depth = 0
else:
self._depth = len(labels[0])
self._labels = labels
self._hand_config = hand_conf
self._hand_transform = hand_transform
self._is_goal = is_goal
self._is_leaf = is_leaf
self._is_valid = is_valid
self._pre_grasp_conf = pre_grasp_conf
self._arm_conf = arm_conf
self._num_possible_children = num_possible_children
self._num_possible_leaves = num_possible_leaves
self._quality = quality
def get_labels(self):
return self._labels
def get_depth(self):
return self._depth
def get_hand_config(self):
return self._hand_config
def get_pre_grasp_config(self):
return self._pre_grasp_conf
def is_goal(self):
return self._is_goal
def get_hand_transform(self):
return self._hand_transform
def get_arm_configuration(self):
return self._arm_conf
def get_unique_label(self):
if self._labels is None:
return 'root'
label = []
for finger_label in self._labels:
label.extend(finger_label)
return str(label)
def is_extendible(self):
return not self._is_leaf
def is_leaf(self):
return self._is_leaf
def is_valid(self):
return self._is_valid
def get_num_possible_children(self):
return self._num_possible_children
def get_num_possible_leaves(self):
return self._num_possible_leaves
def get_quality(self):
return self._quality
|
[
"openravepy.RaveDestroy",
"numpy.sum",
"numpy.random.randint",
"numpy.linalg.norm",
"utils.normal_distance",
"rospy.logwarn",
"openravepy.Environment",
"IPython.embed",
"numpy.random.choice",
"math.isnan",
"transformations.rotation_from_matrix",
"numpy.asarray",
"transformations.rotation_matrix",
"openravepy.CollisionReport",
"rospy.loginfo",
"rospy.logdebug",
"numpy.linalg.inv",
"robotiqloader.RobotiqHand",
"numpy.dot",
"utils.clamp",
"transformations.identity_matrix",
"numpy.concatenate",
"openravepy.databases.inversekinematics.InverseKinematicsModel",
"logging.debug",
"math.fabs",
"logging.info",
"numpy.array",
"itertools.izip",
"scipy.spatial.KDTree",
"utils.position_distance",
"utils.dist_in_range",
"utils.compute_grasp_stability"
] |
[((931, 1053), 'openravepy.databases.inversekinematics.InverseKinematicsModel', 'orpy.databases.inversekinematics.InverseKinematicsModel', (['self._robot'], {'iktype': 'orpy.IkParameterization.Type.Transform6D'}), '(self._robot, iktype\n =orpy.IkParameterization.Type.Transform6D)\n', (986, 1053), True, 'import openravepy as orpy\n'), ((3537, 3555), 'openravepy.Environment', 'orpy.Environment', ([], {}), '()\n', (3553, 3555), True, 'import openravepy as orpy\n'), ((4757, 4775), 'openravepy.RaveDestroy', 'orpy.RaveDestroy', ([], {}), '()\n', (4773, 4775), True, 'import openravepy as orpy\n'), ((5944, 6010), 'utils.compute_grasp_stability', 'compute_grasp_stability', ([], {'grasp_contacts': 'real_contacts', 'mu': 'self._mu'}), '(grasp_contacts=real_contacts, mu=self._mu)\n', (5967, 6010), False, 'from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range\n'), ((6214, 6235), 'scipy.spatial.KDTree', 'KDTree', (['points[:, :3]'], {}), '(points[:, :3])\n', (6220, 6235), False, 'from scipy.spatial import KDTree\n'), ((7592, 7612), 'numpy.asarray', 'np.asarray', (['contacts'], {}), '(contacts)\n', (7602, 7612), True, 'import numpy as np\n'), ((8957, 8989), 'rospy.logwarn', 'rospy.logwarn', (['"""Debug visualize"""'], {}), "('Debug visualize')\n", (8970, 8989), False, 'import rospy\n'), ((10324, 10344), 'numpy.asarray', 'np.asarray', (['contacts'], {}), '(contacts)\n', (10334, 10344), True, 'import numpy as np\n'), ((11530, 11579), 'itertools.izip', 'itertools.izip', (['old_labels', 'fingertip_assignments'], {}), '(old_labels, fingertip_assignments)\n', (11544, 11579), False, 'import itertools\n'), ((12709, 12731), 'numpy.linalg.norm', 'np.linalg.norm', (['normal'], {}), '(normal)\n', (12723, 12731), True, 'import numpy as np\n'), ((14004, 14026), 'openravepy.CollisionReport', 'orpy.CollisionReport', ([], {}), '()\n', (14024, 14026), True, 'import openravepy as orpy\n'), ((15001, 15026), 'numpy.asarray', 'np.asarray', (['real_contacts'], {}), '(real_contacts)\n', (15011, 15026), True, 'import numpy as np\n'), ((17208, 17262), 'rospy.loginfo', 'rospy.loginfo', (['"""Object loaded in OpenRAVE environment"""'], {}), "('Object loaded in OpenRAVE environment')\n", (17221, 17262), False, 'import rospy\n'), ((17476, 17491), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (17489, 17491), False, 'import IPython\n'), ((19585, 19699), 'rospy.logdebug', 'rospy.logdebug', (["('[HFTSSampler::sample_grasp] Sampling a grasp; %i number of iterations' %\n self._max_iters)"], {}), "(\n '[HFTSSampler::sample_grasp] Sampling a grasp; %i number of iterations' %\n self._max_iters)\n", (19599, 19699), False, 'import rospy\n'), ((26450, 26483), 'transformations.identity_matrix', 'transformations.identity_matrix', ([], {}), '()\n', (26481, 26483), False, 'import transformations\n'), ((27826, 27905), 'logging.info', 'logging.info', (['"""[HFTSSampler::_post_optimization] Performing post optimization."""'], {}), "('[HFTSSampler::_post_optimization] Performing post optimization.')\n", (27838, 27905), False, 'import sys, time, logging, copy\n'), ((27982, 28029), 'transformations.rotation_from_matrix', 'transformations.rotation_from_matrix', (['transform'], {}), '(transform)\n', (28018, 28029), False, 'import transformations\n'), ((29569, 29613), 'transformations.rotation_matrix', 'transformations.rotation_matrix', (['angle', 'axis'], {}), '(angle, axis)\n', (29600, 29613), False, 'import transformations\n'), ((30019, 30063), 'transformations.rotation_matrix', 'transformations.rotation_matrix', (['angle', 'axis'], {}), '(angle, axis)\n', (30050, 30063), False, 'import transformations\n'), ((30274, 30331), 'utils.position_distance', 'position_distance', (['desired_contact_points', 'temp_positions'], {}), '(desired_contact_points, temp_positions)\n', (30291, 30331), False, 'from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range\n'), ((30353, 30407), 'utils.normal_distance', 'normal_distance', (['desired_contact_normals', 'temp_normals'], {}), '(desired_contact_normals, temp_normals)\n', (30368, 30407), False, 'from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range\n'), ((1217, 1279), 'rospy.loginfo', 'rospy.loginfo', (['"""No IKFast solver found. Generating new one..."""'], {}), "('No IKFast solver found. Generating new one...')\n", (1230, 1279), False, 'import rospy\n'), ((1692, 1729), 'numpy.dot', 'np.dot', (['object_pose', 'hand_pose_object'], {}), '(object_pose, hand_pose_object)\n', (1698, 1729), True, 'import numpy as np\n'), ((5184, 5215), 'numpy.linalg.inv', 'np.linalg.inv', (['object_hfts_pose'], {}), '(object_hfts_pose)\n', (5197, 5215), True, 'import numpy as np\n'), ((11213, 11252), 'numpy.random.choice', 'np.random.choice', (['allowed_finger_combos'], {}), '(allowed_finger_combos)\n', (11229, 11252), True, 'import numpy as np\n'), ((11303, 11393), 'numpy.random.choice', 'np.random.choice', (['self._branching_factors[new_depth]', 'self._num_contacts'], {'replace': '(True)'}), '(self._branching_factors[new_depth], self._num_contacts,\n replace=True)\n', (11319, 11393), True, 'import numpy as np\n'), ((12599, 12621), 'numpy.sum', 'np.sum', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (12605, 12621), True, 'import numpy as np\n'), ((12653, 12676), 'numpy.sum', 'np.sum', (['normals'], {'axis': '(0)'}), '(normals, axis=0)\n', (12659, 12676), True, 'import numpy as np\n'), ((13705, 13744), 'numpy.random.choice', 'np.random.choice', (['allowed_finger_combos'], {}), '(allowed_finger_combos)\n', (13721, 13744), True, 'import numpy as np\n'), ((15735, 15822), 'robotiqloader.RobotiqHand', 'RobotiqHand', ([], {'hand_cache_file': 'hand_cache_file', 'env': 'self._orEnv', 'hand_file': 'hand_file'}), '(hand_cache_file=hand_cache_file, env=self._orEnv, hand_file=\n hand_file)\n', (15746, 15822), False, 'from robotiqloader import RobotiqHand, InvalidTriangleException\n'), ((16047, 16080), 'transformations.identity_matrix', 'transformations.identity_matrix', ([], {}), '()\n', (16078, 16080), False, 'import transformations\n'), ((16168, 16220), 'rospy.loginfo', 'rospy.loginfo', (['"""Hand loaded in OpenRAVE environment"""'], {}), "('Hand loaded in OpenRAVE environment')\n", (16181, 16220), False, 'import rospy\n'), ((21793, 21888), 'rospy.logdebug', 'rospy.logdebug', (["('[HFTSSampler::sample_grasp] Approximate has final quality: %i' % sample_q)"], {}), "(\n '[HFTSSampler::sample_grasp] Approximate has final quality: %i' % sample_q)\n", (21807, 21888), False, 'import rospy\n'), ((22256, 22359), 'logging.debug', 'logging.debug', (['"""[HFTSSampler::sample_grasp] We sampled a goal grasp (might be in collision)!"""'], {}), "(\n '[HFTSSampler::sample_grasp] We sampled a goal grasp (might be in collision)!'\n )\n", (22269, 22359), False, 'import sys, time, logging, copy\n'), ((22382, 22444), 'logging.debug', 'logging.debug', (['"""[HFTSSampler::sample_grasp] We sampled a leaf"""'], {}), "('[HFTSSampler::sample_grasp] We sampled a leaf')\n", (22395, 22444), False, 'import sys, time, logging, copy\n'), ((27284, 27303), 'numpy.array', 'np.array', (['(1, 0, 0)'], {}), '((1, 0, 0))\n', (27292, 27303), True, 'import numpy as np\n'), ((27303, 27322), 'numpy.array', 'np.array', (['(0, 1, 0)'], {}), '((0, 1, 0))\n', (27311, 27322), True, 'import numpy as np\n'), ((27322, 27341), 'numpy.array', 'np.array', (['(0, 0, 1)'], {}), '((0, 0, 1))\n', (27330, 27341), True, 'import numpy as np\n'), ((27631, 27649), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (27641, 27649), True, 'import numpy as np\n'), ((2155, 2177), 'numpy.asarray', 'np.asarray', (['grasp_conf'], {}), '(grasp_conf)\n', (2165, 2177), True, 'import numpy as np\n'), ((2314, 2363), 'utils.clamp', 'clamp', (['pre_grasp_conf', 'lower_limits', 'upper_limits'], {}), '(pre_grasp_conf, lower_limits, upper_limits)\n', (2319, 2363), False, 'from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range\n'), ((10819, 10836), 'math.isnan', 'math.isnan', (['o_tmp'], {}), '(o_tmp)\n', (10829, 10836), False, 'import math\n'), ((14941, 14975), 'numpy.concatenate', 'np.concatenate', (['(position, normal)'], {}), '((position, normal))\n', (14955, 14975), True, 'import numpy as np\n'), ((18617, 18710), 'rospy.logwarn', 'rospy.logwarn', (['"""[HFTSSampler::sample_grasp] We have no allowed contacts left! Aborting."""'], {}), "(\n '[HFTSSampler::sample_grasp] We have no allowed contacts left! Aborting.')\n", (18630, 18710), False, 'import rospy\n'), ((23106, 23128), 'numpy.asarray', 'np.asarray', (['grasp_conf'], {}), '(grasp_conf)\n', (23116, 23128), True, 'import numpy as np\n'), ((28520, 28575), 'utils.dist_in_range', 'dist_in_range', (['x[1]', '[lower_limits[1], upper_limits[1]]'], {}), '(x[1], [lower_limits[1], upper_limits[1]])\n', (28533, 28575), False, 'from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range\n'), ((10856, 10872), 'math.fabs', 'math.fabs', (['o_tmp'], {}), '(o_tmp)\n', (10865, 10872), False, 'import math\n'), ((13054, 13099), 'numpy.random.randint', 'np.random.randint', (['self._branching_factors[i]'], {}), '(self._branching_factors[i])\n', (13071, 13099), True, 'import numpy as np\n'), ((13282, 13339), 'numpy.random.randint', 'np.random.randint', (['self._branching_factors[i + match_len]'], {}), '(self._branching_factors[i + match_len])\n', (13299, 13339), True, 'import numpy as np\n'), ((28441, 28496), 'utils.dist_in_range', 'dist_in_range', (['x[0]', '[lower_limits[0], upper_limits[0]]'], {}), '(x[0], [lower_limits[0], upper_limits[0]])\n', (28454, 28496), False, 'from utils import ObjectFileIO, clamp, compute_grasp_stability, normal_distance, position_distance, dist_in_range\n')]
|
# © 2021 <NAME> (initOS GmbH)
# License Apache-2.0 (http://www.apache.org/licenses/).
import os
from queue import Empty
from unittest import mock
import pytest
from doblib.bootstrap import BootstrapEnvironment, aggregate_repo
def aggregate_exception(repo, args, sem, err_queue):
try:
err_queue.put_nowait("ERROR")
finally:
sem.release()
@pytest.fixture
def env():
cur = os.getcwd()
os.chdir("tests/environment/")
env = BootstrapEnvironment("odoo.local.yaml")
os.chdir(cur)
return env
def test_init(env):
env.generate_config = mock.MagicMock()
env._bootstrap = mock.MagicMock()
env.init(["--no-config"])
env.generate_config.assert_not_called()
env._bootstrap.assert_called_once()
env._bootstrap.reset_mock()
env.init()
env.generate_config.assert_called_once()
env._bootstrap.assert_called_once()
@mock.patch("doblib.bootstrap.match_dir", return_value=False)
def test_aggregate_repo(match_mock):
m = mock.MagicMock()
aggregate_repo(m, m, m, m)
m.put_nowait.assert_not_called()
m.release.assert_called_once()
match_mock.assert_called_once_with(m.cwd, m.dirmatch)
m.aggregate.assert_not_called()
m.reset_mock()
match_mock.return_value = True
aggregate_repo(m, m, m, m)
m.put_nowait.assert_not_called()
m.release.assert_called_once()
m.aggregate.assert_called()
m.reset_mock()
match_mock.side_effect = Exception()
aggregate_repo(m, m, m, m)
m.put_nowait.assert_called()
m.release.assert_called_once()
m.aggregate.assert_not_called()
@mock.patch("doblib.bootstrap.traceback")
@mock.patch("doblib.bootstrap.Repo")
@mock.patch("doblib.bootstrap.aggregate_repo")
@mock.patch("doblib.bootstrap.get_repos", return_value=[{"cwd": "unknown"}])
def test_bootstrap(repos, aggregate, repo, traceback, env):
env.generate_config = mock.MagicMock()
assert not env.init()
repos.assert_called_once()
repo.assert_called()
aggregate.assert_called()
aggregate.reset_mock()
env.init(["-j", "1"])
aggregate.assert_called()
with mock.patch("doblib.bootstrap.Queue") as m:
queue = m.return_value
queue.empty.return_value = False
queue.get_nowait.side_effect = [(1, 42, 37), Empty()]
assert env.init() == 1
queue.empty.assert_called()
queue.get_nowait.assert_called()
traceback.print_exception.assert_called_once_with(1, 42, 37)
|
[
"unittest.mock.MagicMock",
"os.getcwd",
"queue.Empty",
"unittest.mock.patch",
"doblib.bootstrap.aggregate_repo",
"doblib.bootstrap.BootstrapEnvironment",
"os.chdir"
] |
[((889, 949), 'unittest.mock.patch', 'mock.patch', (['"""doblib.bootstrap.match_dir"""'], {'return_value': '(False)'}), "('doblib.bootstrap.match_dir', return_value=False)\n", (899, 949), False, 'from unittest import mock\n'), ((1601, 1641), 'unittest.mock.patch', 'mock.patch', (['"""doblib.bootstrap.traceback"""'], {}), "('doblib.bootstrap.traceback')\n", (1611, 1641), False, 'from unittest import mock\n'), ((1643, 1678), 'unittest.mock.patch', 'mock.patch', (['"""doblib.bootstrap.Repo"""'], {}), "('doblib.bootstrap.Repo')\n", (1653, 1678), False, 'from unittest import mock\n'), ((1680, 1725), 'unittest.mock.patch', 'mock.patch', (['"""doblib.bootstrap.aggregate_repo"""'], {}), "('doblib.bootstrap.aggregate_repo')\n", (1690, 1725), False, 'from unittest import mock\n'), ((1727, 1802), 'unittest.mock.patch', 'mock.patch', (['"""doblib.bootstrap.get_repos"""'], {'return_value': "[{'cwd': 'unknown'}]"}), "('doblib.bootstrap.get_repos', return_value=[{'cwd': 'unknown'}])\n", (1737, 1802), False, 'from unittest import mock\n'), ((405, 416), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (414, 416), False, 'import os\n'), ((421, 451), 'os.chdir', 'os.chdir', (['"""tests/environment/"""'], {}), "('tests/environment/')\n", (429, 451), False, 'import os\n'), ((462, 501), 'doblib.bootstrap.BootstrapEnvironment', 'BootstrapEnvironment', (['"""odoo.local.yaml"""'], {}), "('odoo.local.yaml')\n", (482, 501), False, 'from doblib.bootstrap import BootstrapEnvironment, aggregate_repo\n'), ((506, 519), 'os.chdir', 'os.chdir', (['cur'], {}), '(cur)\n', (514, 519), False, 'import os\n'), ((583, 599), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (597, 599), False, 'from unittest import mock\n'), ((621, 637), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (635, 637), False, 'from unittest import mock\n'), ((995, 1011), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1009, 1011), False, 'from unittest import mock\n'), ((1016, 1042), 'doblib.bootstrap.aggregate_repo', 'aggregate_repo', (['m', 'm', 'm', 'm'], {}), '(m, m, m, m)\n', (1030, 1042), False, 'from doblib.bootstrap import BootstrapEnvironment, aggregate_repo\n'), ((1269, 1295), 'doblib.bootstrap.aggregate_repo', 'aggregate_repo', (['m', 'm', 'm', 'm'], {}), '(m, m, m, m)\n', (1283, 1295), False, 'from doblib.bootstrap import BootstrapEnvironment, aggregate_repo\n'), ((1466, 1492), 'doblib.bootstrap.aggregate_repo', 'aggregate_repo', (['m', 'm', 'm', 'm'], {}), '(m, m, m, m)\n', (1480, 1492), False, 'from doblib.bootstrap import BootstrapEnvironment, aggregate_repo\n'), ((1889, 1905), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1903, 1905), False, 'from unittest import mock\n'), ((2114, 2150), 'unittest.mock.patch', 'mock.patch', (['"""doblib.bootstrap.Queue"""'], {}), "('doblib.bootstrap.Queue')\n", (2124, 2150), False, 'from unittest import mock\n'), ((2283, 2290), 'queue.Empty', 'Empty', ([], {}), '()\n', (2288, 2290), False, 'from queue import Empty\n')]
|
from django.conf.urls import url
from django.http import HttpResponseRedirect
from .views import PostCreate, PostUpdate, PostDelete, ProfileView
app_name = 'api'
urlpatterns = [
url(r'^$', lambda r: HttpResponseRedirect('new/'), name='index'),
url(r'^(?P<pk>[0-9]+)/$', PostUpdate.as_view(), name='update'),
url(r'^new/$', PostCreate.as_view(), name='create'),
url(r'^(?P<pk>[0-9]+)/delete/$', PostDelete.as_view(), name='delete'),
# API
url(r'^profile/$', ProfileView.as_view(), name='profile'),
]
|
[
"django.http.HttpResponseRedirect"
] |
[((206, 234), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['"""new/"""'], {}), "('new/')\n", (226, 234), False, 'from django.http import HttpResponseRedirect\n')]
|
import uuid
from django import forms
from django.conf import settings
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from spreedly.models import Plan, Gift
from spreedly.functions import subscription_url, check_trial_eligibility, return_url
import spreedly.settings as spreedly_settings
from spreedly.pyspreedly.api import Client
class SubscribeForm(forms.Form):
username = forms.CharField(
max_length=30,
required=True
)
email = forms.EmailField(
required=True
)
password1 = forms.CharField(
label="Password",
required=True,
widget=forms.PasswordInput(),
)
password2 = forms.CharField(
label="Password again",
required=True,
widget=forms.PasswordInput(),
)
subscription = forms.ModelChoiceField(queryset=Plan.objects.filter(enabled=True), empty_label=None)
def clean(self):
username = self.cleaned_data.get("username")
email = self.cleaned_data.get("email")
pass1 = self.cleaned_data.get("password1")
pass2 = self.cleaned_data.get("<PASSWORD>")
plan = self.cleaned_data.get("subscription")
if username and email and pass1 and pass2:
if pass1 != pass2:
raise forms.ValidationError(_("You must type the same password each time."))
if plan.is_free_trial_plan:
existing_users = Subscription.objects.filter(user__email=email, trial_elegible=False).count()
if existing_users:
raise forms.ValidationError(_("A user with this email has already had a free trial."))
user, created = User.objects.get_or_create(username=username.lower(), defaults={
'email': email,
'is_active': False
})
if not created and user.is_active:
raise forms.ValidationError(_("Sorry, This username is already taken."))
elif not created:
user.email = email
user.save()
return self.cleaned_data
def save(self):
user = User.objects.get(username=self.cleaned_data["username"].lower())
user.set_password(self.cleaned_data["password2"])
user.save()
plan = self.cleaned_data["subscription"]
trial = check_trial_eligibility(plan, user)
if trial:
url = return_url(plan.pk, user, trial=True)
else:
url = subscription_url(plan, user)
send_mail(
spreedly_settings.SPREEDLY_CONFIRM_EMAIL_SUBJECT,
render_to_string(spreedly_settings.SPREEDLY_CONFIRM_EMAIL, {
'plan_name': plan.name,
'user': user,
'site': spreedly_settings.SPREEDLY_SITE_URL,
'spreedly_url': url
}),
settings.DEFAULT_FROM_EMAIL,
[user.email,]
)
return reverse('spreedly_email_sent', args=[user.id])
class GiftRegisterForm(forms.Form):
username = forms.CharField(
max_length=30,
required=True
)
email = forms.EmailField(
required=True
)
password1 = forms.CharField(
label="Password",
required=True,
widget=forms.PasswordInput(),
)
password2 = forms.CharField(
label="<PASSWORD>",
required=True,
widget=forms.PasswordInput(),
)
gift_key = forms.CharField(max_length=32, required=True, widget=forms.HiddenInput)
def clean(self):
username = self.cleaned_data.get("username")
email = self.cleaned_data.get("email")
pass1 = self.cleaned_data.get("password1")
pass2 = self.cleaned_data.get("<PASSWORD>")
gift_key = self.cleaned_data.get("gift_key")
if username:
try:
User.objects.get(username=self.cleaned_data['username'], is_active=True)
raise forms.ValidationError(_("Sorry, This username is already taken."))
except User.DoesNotExist:
pass
if username and email and pass1 and pass2:
if pass1 != pass2:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data
def save(self):
# remove any inactive users with this same username
try:
old_user = User.objects.get(username=self.cleaned_data['username'], is_active=False)
old_user.delete()
except User.DoesNotExist:
pass
gift = Gift.objects.get(uuid=self.cleaned_data["gift_key"])
user = gift.to_user
user.username = self.cleaned_data['username']
user.email = self.cleaned_data['email']
user.set_password(self.cleaned_data['<PASSWORD>'])
user.is_active=True
user.save()
#update spreedly info
client = Client(settings.SPREEDLY_AUTH_TOKEN, settings.SPREEDLY_SITE_NAME)
client.set_info(user.pk, email=user.email, screen_name=user.username)
gift.delete()
return user
class GiftForm(forms.Form):
subscription = forms.ModelChoiceField(queryset=Plan.objects.filter(plan_type='gift'), empty_label=None)
your_name = forms.CharField(
label="Your Name",
required=True
)
message = forms.CharField(
label="Message",
required=False,
widget=forms.Textarea(attrs={'rows':3, 'cols':55})
)
email = forms.EmailField(
label="Email",
required=True
)
email_again = forms.EmailField(
label="Email Again",
required=True
)
def clean(self):
email = self.cleaned_data.get("email")
email2 = self.cleaned_data.get("email_again")
if email and email2:
if email != email2:
raise forms.ValidationError(_("The two emails don't match. Please make sure both are correct."))
return self.cleaned_data
def save(self, request):
gift_id = str(uuid.uuid4().hex)[:29]
plan = self.cleaned_data["subscription"]
user = User.objects.create(
username=gift_id,
email=self.cleaned_data["email"],
is_active=False,
password='<PASSWORD>'
)
Gift.objects.create(
from_user=request.user,
to_user=user,
uuid = gift_id,
plan_name=plan.name,
message=self.cleaned_data["message"]
)
return (plan, user)
class PlanModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
if obj.enabled:
return unicode(obj)
else:
return '*%s' % (obj)
class AdminGiftForm(forms.Form):
plan_name = forms.CharField(
label="Plan Name",
required=True
)
feature_level = forms.ChoiceField(
label="Feature Level",
choices=[(x,x) for x in set(Plan.objects.values_list('feature_level', flat=True))]
)
time = forms.ChoiceField(
label="Time",
choices=[(i,i) for i in range(1,91)]
)
units = forms.ChoiceField(
label="Time Units",
choices=[
('days', 'Day(s)'),
('months', 'Month(s)')
]
)
your_name = forms.CharField(
label="Your Name",
required=True
)
message = forms.CharField(
label="Message",
required=False,
widget=forms.Textarea(attrs={'rows':3, 'cols':55})
)
email = forms.EmailField(
label="Email",
required=True
)
def save(self, request):
gift_id = str(uuid.uuid4().hex)[:29]
user = User.objects.create(
username=gift_id,
email=self.cleaned_data["email"],
is_active=False,
password='<PASSWORD>'
)
Gift.objects.create(
from_user=request.user,
to_user=user,
uuid = gift_id,
message=self.cleaned_data["message"],
plan_name=self.cleaned_data["plan_name"]
)
return user
|
[
"django.forms.EmailField",
"django.core.urlresolvers.reverse",
"django.forms.PasswordInput",
"spreedly.models.Plan.objects.values_list",
"spreedly.models.Gift.objects.create",
"django.utils.translation.ugettext_lazy",
"spreedly.pyspreedly.api.Client",
"django.contrib.auth.models.User.objects.get",
"django.forms.ChoiceField",
"django.contrib.auth.models.User.objects.create",
"spreedly.models.Gift.objects.get",
"django.forms.Textarea",
"spreedly.functions.check_trial_eligibility",
"spreedly.functions.return_url",
"uuid.uuid4",
"spreedly.functions.subscription_url",
"django.template.loader.render_to_string",
"spreedly.models.Plan.objects.filter",
"django.forms.CharField"
] |
[((616, 661), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(30)', 'required': '(True)'}), '(max_length=30, required=True)\n', (631, 661), False, 'from django import forms\n'), ((696, 727), 'django.forms.EmailField', 'forms.EmailField', ([], {'required': '(True)'}), '(required=True)\n', (712, 727), False, 'from django import forms\n'), ((3321, 3366), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(30)', 'required': '(True)'}), '(max_length=30, required=True)\n', (3336, 3366), False, 'from django import forms\n'), ((3401, 3432), 'django.forms.EmailField', 'forms.EmailField', ([], {'required': '(True)'}), '(required=True)\n', (3417, 3432), False, 'from django import forms\n'), ((3716, 3787), 'django.forms.CharField', 'forms.CharField', ([], {'max_length': '(32)', 'required': '(True)', 'widget': 'forms.HiddenInput'}), '(max_length=32, required=True, widget=forms.HiddenInput)\n', (3731, 3787), False, 'from django import forms\n'), ((5543, 5592), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Your Name"""', 'required': '(True)'}), "(label='Your Name', required=True)\n", (5558, 5592), False, 'from django import forms\n'), ((5772, 5818), 'django.forms.EmailField', 'forms.EmailField', ([], {'label': '"""Email"""', 'required': '(True)'}), "(label='Email', required=True)\n", (5788, 5818), False, 'from django import forms\n'), ((5859, 5911), 'django.forms.EmailField', 'forms.EmailField', ([], {'label': '"""Email Again"""', 'required': '(True)'}), "(label='Email Again', required=True)\n", (5875, 5911), False, 'from django import forms\n'), ((7108, 7157), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Plan Name"""', 'required': '(True)'}), "(label='Plan Name', required=True)\n", (7123, 7157), False, 'from django import forms\n'), ((7462, 7558), 'django.forms.ChoiceField', 'forms.ChoiceField', ([], {'label': '"""Time Units"""', 'choices': "[('days', 'Day(s)'), ('months', 'Month(s)')]"}), "(label='Time Units', choices=[('days', 'Day(s)'), (\n 'months', 'Month(s)')])\n", (7479, 7558), False, 'from django import forms\n'), ((7627, 7676), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Your Name"""', 'required': '(True)'}), "(label='Your Name', required=True)\n", (7642, 7676), False, 'from django import forms\n'), ((7856, 7902), 'django.forms.EmailField', 'forms.EmailField', ([], {'label': '"""Email"""', 'required': '(True)'}), "(label='Email', required=True)\n", (7872, 7902), False, 'from django import forms\n'), ((2613, 2648), 'spreedly.functions.check_trial_eligibility', 'check_trial_eligibility', (['plan', 'user'], {}), '(plan, user)\n', (2636, 2648), False, 'from spreedly.functions import subscription_url, check_trial_eligibility, return_url\n'), ((3222, 3268), 'django.core.urlresolvers.reverse', 'reverse', (['"""spreedly_email_sent"""'], {'args': '[user.id]'}), "('spreedly_email_sent', args=[user.id])\n", (3229, 3268), False, 'from django.core.urlresolvers import reverse\n'), ((4867, 4919), 'spreedly.models.Gift.objects.get', 'Gift.objects.get', ([], {'uuid': "self.cleaned_data['gift_key']"}), "(uuid=self.cleaned_data['gift_key'])\n", (4883, 4919), False, 'from spreedly.models import Plan, Gift\n'), ((5204, 5269), 'spreedly.pyspreedly.api.Client', 'Client', (['settings.SPREEDLY_AUTH_TOKEN', 'settings.SPREEDLY_SITE_NAME'], {}), '(settings.SPREEDLY_AUTH_TOKEN, settings.SPREEDLY_SITE_NAME)\n', (5210, 5269), False, 'from spreedly.pyspreedly.api import Client\n'), ((6436, 6551), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {'username': 'gift_id', 'email': "self.cleaned_data['email']", 'is_active': '(False)', 'password': '"""<PASSWORD>"""'}), "(username=gift_id, email=self.cleaned_data['email'],\n is_active=False, password='<PASSWORD>')\n", (6455, 6551), False, 'from django.contrib.auth.models import User\n'), ((6623, 6757), 'spreedly.models.Gift.objects.create', 'Gift.objects.create', ([], {'from_user': 'request.user', 'to_user': 'user', 'uuid': 'gift_id', 'plan_name': 'plan.name', 'message': "self.cleaned_data['message']"}), "(from_user=request.user, to_user=user, uuid=gift_id,\n plan_name=plan.name, message=self.cleaned_data['message'])\n", (6642, 6757), False, 'from spreedly.models import Plan, Gift\n'), ((8024, 8139), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {'username': 'gift_id', 'email': "self.cleaned_data['email']", 'is_active': '(False)', 'password': '"""<PASSWORD>"""'}), "(username=gift_id, email=self.cleaned_data['email'],\n is_active=False, password='<PASSWORD>')\n", (8043, 8139), False, 'from django.contrib.auth.models import User\n'), ((8211, 8371), 'spreedly.models.Gift.objects.create', 'Gift.objects.create', ([], {'from_user': 'request.user', 'to_user': 'user', 'uuid': 'gift_id', 'message': "self.cleaned_data['message']", 'plan_name': "self.cleaned_data['plan_name']"}), "(from_user=request.user, to_user=user, uuid=gift_id,\n message=self.cleaned_data['message'], plan_name=self.cleaned_data[\n 'plan_name'])\n", (8230, 8371), False, 'from spreedly.models import Plan, Gift\n'), ((839, 860), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {}), '()\n', (858, 860), False, 'from django import forms\n'), ((971, 992), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {}), '()\n', (990, 992), False, 'from django import forms\n'), ((1051, 1084), 'spreedly.models.Plan.objects.filter', 'Plan.objects.filter', ([], {'enabled': '(True)'}), '(enabled=True)\n', (1070, 1084), False, 'from spreedly.models import Plan, Gift\n'), ((2685, 2722), 'spreedly.functions.return_url', 'return_url', (['plan.pk', 'user'], {'trial': '(True)'}), '(plan.pk, user, trial=True)\n', (2695, 2722), False, 'from spreedly.functions import subscription_url, check_trial_eligibility, return_url\n'), ((2755, 2783), 'spreedly.functions.subscription_url', 'subscription_url', (['plan', 'user'], {}), '(plan, user)\n', (2771, 2783), False, 'from spreedly.functions import subscription_url, check_trial_eligibility, return_url\n'), ((2886, 3058), 'django.template.loader.render_to_string', 'render_to_string', (['spreedly_settings.SPREEDLY_CONFIRM_EMAIL', "{'plan_name': plan.name, 'user': user, 'site': spreedly_settings.\n SPREEDLY_SITE_URL, 'spreedly_url': url}"], {}), "(spreedly_settings.SPREEDLY_CONFIRM_EMAIL, {'plan_name':\n plan.name, 'user': user, 'site': spreedly_settings.SPREEDLY_SITE_URL,\n 'spreedly_url': url})\n", (2902, 3058), False, 'from django.template.loader import render_to_string\n'), ((3544, 3565), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {}), '()\n', (3563, 3565), False, 'from django import forms\n'), ((3672, 3693), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {}), '()\n', (3691, 3693), False, 'from django import forms\n'), ((4697, 4770), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': "self.cleaned_data['username']", 'is_active': '(False)'}), "(username=self.cleaned_data['username'], is_active=False)\n", (4713, 4770), False, 'from django.contrib.auth.models import User\n'), ((5470, 5507), 'spreedly.models.Plan.objects.filter', 'Plan.objects.filter', ([], {'plan_type': '"""gift"""'}), "(plan_type='gift')\n", (5489, 5507), False, 'from spreedly.models import Plan, Gift\n'), ((5710, 5755), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'rows': 3, 'cols': 55}"}), "(attrs={'rows': 3, 'cols': 55})\n", (5724, 5755), False, 'from django import forms\n'), ((7794, 7839), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'rows': 3, 'cols': 55}"}), "(attrs={'rows': 3, 'cols': 55})\n", (7808, 7839), False, 'from django import forms\n'), ((4147, 4219), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': "self.cleaned_data['username']", 'is_active': '(True)'}), "(username=self.cleaned_data['username'], is_active=True)\n", (4163, 4219), False, 'from django.contrib.auth.models import User\n'), ((1539, 1586), 'django.utils.translation.ugettext_lazy', '_', (['"""You must type the same password each time."""'], {}), "('You must type the same password each time.')\n", (1540, 1586), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2185, 2228), 'django.utils.translation.ugettext_lazy', '_', (['"""Sorry, This username is already taken."""'], {}), "('Sorry, This username is already taken.')\n", (2186, 2228), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4264, 4307), 'django.utils.translation.ugettext_lazy', '_', (['"""Sorry, This username is already taken."""'], {}), "('Sorry, This username is already taken.')\n", (4265, 4307), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4494, 4541), 'django.utils.translation.ugettext_lazy', '_', (['"""You must type the same password each time."""'], {}), "('You must type the same password each time.')\n", (4495, 4541), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6182, 6249), 'django.utils.translation.ugettext_lazy', '_', (['"""The two emails don\'t match. Please make sure both are correct."""'], {}), '("The two emails don\'t match. Please make sure both are correct.")\n', (6183, 6249), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6340, 6352), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6350, 6352), False, 'import uuid\n'), ((7977, 7989), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7987, 7989), False, 'import uuid\n'), ((1834, 1891), 'django.utils.translation.ugettext_lazy', '_', (['"""A user with this email has already had a free trial."""'], {}), "('A user with this email has already had a free trial.')\n", (1835, 1891), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7286, 7338), 'spreedly.models.Plan.objects.values_list', 'Plan.objects.values_list', (['"""feature_level"""'], {'flat': '(True)'}), "('feature_level', flat=True)\n", (7310, 7338), False, 'from spreedly.models import Plan, Gift\n')]
|
import pytest
from lupin.errors import InvalidIn
from lupin.validators import In
@pytest.fixture
def validator():
return In({1, 2, 3})
class TestCall(object):
def test_raise_error_if_invalid_value(self, validator):
with pytest.raises(InvalidIn):
validator(4, [])
def test_does_nothing_if_valid_value(self, validator):
validator(1, [])
|
[
"pytest.raises",
"lupin.validators.In"
] |
[((128, 141), 'lupin.validators.In', 'In', (['{1, 2, 3}'], {}), '({1, 2, 3})\n', (130, 141), False, 'from lupin.validators import In\n'), ((241, 265), 'pytest.raises', 'pytest.raises', (['InvalidIn'], {}), '(InvalidIn)\n', (254, 265), False, 'import pytest\n')]
|
# -*- coding: utf-8 -*-
import os
import csv
import dirfiles
def trial_order(order_directory):
"""
Reads a specific trial order for n blocks from n csv files and
returns n lists to be used by the object block_list.order_trials()
of Expyriment library
"""
# Define the pathway of the inputs directory
order_path = os.path.abspath(order_directory)
# List csv files with sequence order of the inputs
order_filenames = dirfiles.listdir_csvnohidden(order_path)
order_filenames.sort()
# Read csv files
order_list = [[i for i in csv.reader(open(order_filename))]
for order_filename in order_filenames]
# Remove headers of each block lists
for i in range(len(order_list)):
order_list[i].pop(0)
# Extract the sequence from the second column of the block lists
norder_list = [[order_list[i][j][1] for j in range(len(order_list[i]))]
for i in range(len(order_list))]
# Convert "string" into "int" elements
norder_list = [map(int, norder_list[k]) for k in range(len(norder_list))]
# Return final sequence of trials for every block
return norder_list
|
[
"dirfiles.listdir_csvnohidden",
"os.path.abspath"
] |
[((344, 376), 'os.path.abspath', 'os.path.abspath', (['order_directory'], {}), '(order_directory)\n', (359, 376), False, 'import os\n'), ((454, 494), 'dirfiles.listdir_csvnohidden', 'dirfiles.listdir_csvnohidden', (['order_path'], {}), '(order_path)\n', (482, 494), False, 'import dirfiles\n')]
|
import pct_titles
import os
import cythonmagick
from StringIO import StringIO
def escape(s):
s = s.replace("&", "\&")
s = s.replace("<", "\<")
s = s.replace(">", ">")
s = s.replace('"', """)
s = s.replace("'", ''')
return s
def convert_color(c, alpha):
a = 1.0 - (alpha / 100.0)
r = c[0] / 65535.0
g = c[1] / 65535.0
b = c[2] / 65535.0
c = '#%04X%04X%04X%04X' % ( int(r*65535.0), int(g*65535.0), int(b*65535.0),
int(a*65535.0))
return c
def render_item(pct, img, item ,out_dir):
bbox = item.bbox
img.fill_color = 'white'
img.stroke_color = 'white'
min_x = min(bbox[1], bbox[3])
min_y = min(bbox[0], bbox[2])
max_x = max(bbox[1], bbox[3])
max_y = max(bbox[0], bbox[2])
width = max_x - min_x
height = max_y - min_y
rad_x = width/2.0
rad_y = height/2.0
origin_x = min_x + rad_x
origin_y = min_y + rad_y
fill_color = convert_color(item.fill_color, item.fill_alpha)
stroke_color = convert_color(item.border_color, item.border_alpha)
shadow_color = convert_color(item.shadow_color, item.shadow_alpha)
img.fill_color = fill_color
img.stroke_width = item.border_width
img.stroke_color = stroke_color
if item.border_width:
img.stroke_color = stroke_color
else:
img.stroke_color = fill_color
if isinstance(item, pct_titles.TitleLine):
img.stroke_width = item.line_width
img.stroke_color = 'white'
line = cythonmagick.Line(bbox[1], bbox[0], bbox[3], bbox[2])
img.draw([line])
elif isinstance(item, pct_titles.TitleRectangle):
roundness = item.corner_roundness / 2.0
rect = cythonmagick.RoundRectangle(min_x, min_y, max_x, max_y, roundness,roundness)
img.draw([rect])
elif isinstance(item, pct_titles.TitleOval):
origin_x = min_x + rad_x
origin_y = min_y + rad_y
oval = cythonmagick.Ellipse(origin_x, origin_y, rad_x, rad_y, 0, 360)
img.draw([oval])
elif isinstance(item, pct_titles.TitleText):
font_size = item.text_formating[0].font_size
font_id = item.text_formating[0].font_id
font_style_id = item.text_formating[0].style
font = pct.title_page.fonts[font_id].replace(" ", '-')
style = 'normal'
if font_style_id in (0x0200, 0x0300):
style = 'italic'
caption_size = "%dx%d" % (width, 0) # zero for auto height
caption = cythonmagick.Image(size=caption_size)
caption.font = font
caption.density = "72x72"
caption.font_point_size = font_size
caption.background = 'none'
caption.fill_color = fill_color
caption.stroke_width = item.border_width
caption.stroke_color = stroke_color
caption.font_style = style
# bold
if font_style_id in (0x0100, 0x0300):
caption.font_weight = 1
else:
caption.font_weight = 0
text = item.text
caption.read("caption:{text}".format(text=text))
grow = 200
original_size = caption.size()
caption.extent("%dx%d!" % (width+grow, height+grow), 'center')
offset_x = min_x - (caption.size().width - original_size.width) / 2
offset_y = min_y - (caption.size().height - original_size.height) / 2
position = cythonmagick.Geometry(0, 0, offset_x, offset_y)
if item.shadow_depth or item.shadow_blur:
alpha = caption.channel("alpha")
alpha.negate()
# alpha.write(os.path.join(out_dir, "alpha.png"))
shadow = cythonmagick.Image(size=alpha.size(), color=shadow_color)
shadow.composite(alpha, compose = "copyopacity")
if item.shadow_blur:
shadow.blur(1, item.shadow_blur)
shadow_pos = cythonmagick.Geometry(0, 0, offset_x + item.shadow_dir[1], offset_y + item.shadow_dir[0])
shadow.artifacts["compose:args"] = "%d" % (100-item.shadow_alpha)
img.composite(shadow, "dissolve", shadow_pos)
img.composite(caption, "over", position,)
def render_pct(src, dst):
pct = pct_titles.PctFile()
pct.read(src)
size = "865x485" # this seems to be the base resolution
img = cythonmagick.Image(size=size, color="grey")
#convert -list font
for i, item in enumerate(pct.elements):
render_item(pct, img, item, os.path.dirname(dst))
img.resize("720x486!")
name, ext = os.path.splitext(dst)
if ext and ext.lower() in (".pict", '.pct',):
img.magick = 'pict'
data = StringIO(img.tostring())
f = open(dst, 'wb')
pct.embed(data, f)
else:
img.write(dst)
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("not enough args")
render_pct(args[0], args[1])
|
[
"cythonmagick.Ellipse",
"optparse.OptionParser",
"os.path.dirname",
"cythonmagick.Line",
"cythonmagick.Image",
"os.path.splitext",
"pct_titles.PctFile",
"cythonmagick.Geometry",
"cythonmagick.RoundRectangle"
] |
[((4201, 4221), 'pct_titles.PctFile', 'pct_titles.PctFile', ([], {}), '()\n', (4219, 4221), False, 'import pct_titles\n'), ((4311, 4354), 'cythonmagick.Image', 'cythonmagick.Image', ([], {'size': 'size', 'color': '"""grey"""'}), "(size=size, color='grey')\n", (4329, 4354), False, 'import cythonmagick\n'), ((4529, 4550), 'os.path.splitext', 'os.path.splitext', (['dst'], {}), '(dst)\n', (4545, 4550), False, 'import os\n'), ((4838, 4852), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (4850, 4852), False, 'from optparse import OptionParser\n'), ((1543, 1596), 'cythonmagick.Line', 'cythonmagick.Line', (['bbox[1]', 'bbox[0]', 'bbox[3]', 'bbox[2]'], {}), '(bbox[1], bbox[0], bbox[3], bbox[2])\n', (1560, 1596), False, 'import cythonmagick\n'), ((1742, 1819), 'cythonmagick.RoundRectangle', 'cythonmagick.RoundRectangle', (['min_x', 'min_y', 'max_x', 'max_y', 'roundness', 'roundness'], {}), '(min_x, min_y, max_x, max_y, roundness, roundness)\n', (1769, 1819), False, 'import cythonmagick\n'), ((4460, 4480), 'os.path.dirname', 'os.path.dirname', (['dst'], {}), '(dst)\n', (4475, 4480), False, 'import os\n'), ((1975, 2037), 'cythonmagick.Ellipse', 'cythonmagick.Ellipse', (['origin_x', 'origin_y', 'rad_x', 'rad_y', '(0)', '(360)'], {}), '(origin_x, origin_y, rad_x, rad_y, 0, 360)\n', (1995, 2037), False, 'import cythonmagick\n'), ((2520, 2557), 'cythonmagick.Image', 'cythonmagick.Image', ([], {'size': 'caption_size'}), '(size=caption_size)\n', (2538, 2557), False, 'import cythonmagick\n'), ((3405, 3452), 'cythonmagick.Geometry', 'cythonmagick.Geometry', (['(0)', '(0)', 'offset_x', 'offset_y'], {}), '(0, 0, offset_x, offset_y)\n', (3426, 3452), False, 'import cythonmagick\n'), ((3887, 3981), 'cythonmagick.Geometry', 'cythonmagick.Geometry', (['(0)', '(0)', '(offset_x + item.shadow_dir[1])', '(offset_y + item.shadow_dir[0])'], {}), '(0, 0, offset_x + item.shadow_dir[1], offset_y + item.\n shadow_dir[0])\n', (3908, 3981), False, 'import cythonmagick\n')]
|
import os
from flask import Flask, request, redirect, url_for, render_template, send_from_directory
from werkzeug import secure_filename
UPLOAD_FOLDER = 'uploads/'
ALLOWED_EXTENSIONS = set(['m'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16*1024*1024
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET','POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('uploaded_file' , filename=filename))
return render_template('index.html')
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
if __name__ == '__main__':
app.run(debug=True)
|
[
"flask.Flask",
"werkzeug.secure_filename",
"flask.url_for",
"flask.render_template",
"flask.send_from_directory",
"os.path.join"
] |
[((204, 219), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (209, 219), False, 'from flask import Flask, request, redirect, url_for, render_template, send_from_directory\n'), ((814, 843), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (829, 843), False, 'from flask import Flask, request, redirect, url_for, render_template, send_from_directory\n'), ((919, 977), 'flask.send_from_directory', 'send_from_directory', (["app.config['UPLOAD_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_FOLDER'], filename)\n", (938, 977), False, 'from flask import Flask, request, redirect, url_for, render_template, send_from_directory\n'), ((623, 653), 'werkzeug.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (638, 653), False, 'from werkzeug import secure_filename\n'), ((676, 727), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_FOLDER'], filename)\n", (688, 727), False, 'import os\n'), ((757, 800), 'flask.url_for', 'url_for', (['"""uploaded_file"""'], {'filename': 'filename'}), "('uploaded_file', filename=filename)\n", (764, 800), False, 'from flask import Flask, request, redirect, url_for, render_template, send_from_directory\n')]
|
from django.db import models
from django.db.models import Min
import requests
import isbnlib
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
from django.templatetags.static import static
from django.conf import settings
import datetime
from django.utils.timezone import now
class Book(models.Model):
isbn = models.DecimalField(primary_key=True, max_digits=13, decimal_places=0)
title = models.CharField(max_length=255)
original_title = models.CharField(max_length=255)
edition = models.PositiveSmallIntegerField()
year = models.PositiveIntegerField()
avg_price = models.FloatField(default=0, blank=True, null=True)
qty_in_stock = models.IntegerField(default=0)
qty_sold = models.IntegerField(default=0)
publisher = models.ForeignKey('Publisher')
author = models.ManyToManyField('Author')
cover = models.ImageField(upload_to='poylbookexchange/covers')
def sellable(self):
return self.exemplar_set.filter(buyer_id=None).order_by('pk').all()
def sold(self):
return self.exemplar_set.exclude(buyer_id=None).order_by('pk').all()
def used_in_sections(self):
return Section.objects.filter(usedby__book__pk=self.pk).distinct().order_by('pk').all()
def used_in_semesters(self):
return Semester.objects.filter(usedby__book__pk=self.pk).distinct().order_by('pk').all()
def update_metadata(self):
try:
data = isbnlib.meta(str(self.isbn), 'wcat')
self.title = data.get('Title')
self.year = data.get('Year') or 1900
self.publisher, _ = Publisher.objects.get_or_create(name=data.get('Publisher', 'Unknow'))
self.author.clear()
for author in data.get('Authors', []):
for splited_author in author.split(', '):
author_object, _ = Author.objects.get_or_create(name=splited_author)
self.author.add(author_object)
except:
self.title = self.title or '?'
self.year = self.year or 1900
try:
truc = self.publisher
except:
self.publisher, _ = Publisher.objects.get_or_create(name='Unknow')
self.save()
def update_cover(self):
image = requests.get('http://images.amazon.com/images/P/%s.01._SS500_SCLZZZZZZZ_.jpg' % (isbnlib.to_isbn10(str(self.isbn)), ))
if image.status_code == 200 and len(image.content) > 50:
img_temp = NamedTemporaryFile(delete=True)
img_temp.write(image.content)
img_temp.flush()
self.cover.save('%s.jpg' % (self.isbn,), File(img_temp))
else:
self.cover.delete()
def get_current_cover(self):
if self.cover:
return settings.MEDIA_URL + self.cover.name
return static('polybookexchange/default.png')
class Candidate(models.Model):
STATE_CHOICES = (
(u'neuf', u'neuf'),
(u'bon', u'bon'),
(u'acceptable', u'acceptable'),
(u'mauvais', u'mauvais'),
)
isbn = models.DecimalField(max_digits=13, decimal_places=0)
sciper = models.PositiveIntegerField()
annotated = models.BooleanField()
highlighted = models.BooleanField()
state = models.CharField(max_length=10, choices=STATE_CHOICES)
comments = models.TextField()
price = models.FloatField()
creation_date = models.DateTimeField(auto_now_add=True)
def days_left(self):
diff = (self.creation_date + datetime.timedelta(days=16) - now()).days
if diff < 0:
diff = 0
return diff
def days_left_percent(self):
return int(((15 - self.days_left()) * 100.0) / 15.0)
def days_left_color(self):
if self.days_left() < 1:
return 'danger'
if self.days_left() < 5:
return 'warning'
return 'success'
class CandidateUsage(models.Model):
candidate = models.ForeignKey('Candidate')
section = models.ForeignKey('Section')
semester = models.ForeignKey('Semester')
class Exemplar(models.Model):
STATE_CHOICES = (
(u'neuf', u'neuf'),
(u'bon', u'bon'),
(u'acceptable', u'acceptable'),
(u'mauvais', u'mauvais'),
)
book = models.ForeignKey('Book')
price = models.FloatField()
seller_id = models.PositiveIntegerField()
buyer_id = models.PositiveIntegerField(null=True, blank=True)
posted_date = models.DateTimeField(auto_now_add=True)
sold_date = models.DateTimeField(null=True, blank=True)
annotated = models.BooleanField(default=False)
highlighted = models.BooleanField(default=False)
state = models.CharField(max_length=10, choices=STATE_CHOICES)
comments = models.TextField(blank=True, null=True)
def min_price(self):
return Exemplar.objects.filter(book=self.book).exclude(sold_date=None).aggregate(Min('price'))['price__min']
def state_color(self):
mapping = {
'neuf': 'success',
'bon': 'info',
'acceptable': 'warning',
'mauvais': 'danger'
}
return mapping.get(self.state, 'primary')
class Publisher(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Author(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Section(models.Model):
name = models.CharField(max_length=255)
acronym = models.CharField(max_length=10)
class Semester(models.Model):
name = models.CharField(max_length=255)
acronym = models.CharField(max_length=10)
class UsedBy(models.Model):
book = models.ForeignKey('Book')
section = models.ForeignKey('Section')
semester = models.ForeignKey('Semester')
|
[
"django.db.models.TextField",
"django.core.files.File",
"django.db.models.ManyToManyField",
"django.core.files.temp.NamedTemporaryFile",
"django.db.models.Min",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.PositiveIntegerField",
"django.utils.timezone.now",
"django.db.models.FloatField",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.BooleanField",
"django.db.models.ImageField",
"django.db.models.DecimalField",
"django.db.models.IntegerField",
"django.templatetags.static.static",
"datetime.timedelta",
"django.db.models.DateTimeField"
] |
[((355, 425), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'primary_key': '(True)', 'max_digits': '(13)', 'decimal_places': '(0)'}), '(primary_key=True, max_digits=13, decimal_places=0)\n', (374, 425), False, 'from django.db import models\n'), ((438, 470), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (454, 470), False, 'from django.db import models\n'), ((492, 524), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (508, 524), False, 'from django.db import models\n'), ((539, 573), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {}), '()\n', (571, 573), False, 'from django.db import models\n'), ((585, 614), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (612, 614), False, 'from django.db import models\n'), ((631, 682), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)', 'blank': '(True)', 'null': '(True)'}), '(default=0, blank=True, null=True)\n', (648, 682), False, 'from django.db import models\n'), ((702, 732), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (721, 732), False, 'from django.db import models\n'), ((748, 778), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (767, 778), False, 'from django.db import models\n'), ((795, 825), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Publisher"""'], {}), "('Publisher')\n", (812, 825), False, 'from django.db import models\n'), ((839, 871), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""Author"""'], {}), "('Author')\n", (861, 871), False, 'from django.db import models\n'), ((884, 938), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""poylbookexchange/covers"""'}), "(upload_to='poylbookexchange/covers')\n", (901, 938), False, 'from django.db import models\n'), ((3104, 3156), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(13)', 'decimal_places': '(0)'}), '(max_digits=13, decimal_places=0)\n', (3123, 3156), False, 'from django.db import models\n'), ((3170, 3199), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (3197, 3199), False, 'from django.db import models\n'), ((3216, 3237), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (3235, 3237), False, 'from django.db import models\n'), ((3256, 3277), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (3275, 3277), False, 'from django.db import models\n'), ((3290, 3344), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'choices': 'STATE_CHOICES'}), '(max_length=10, choices=STATE_CHOICES)\n', (3306, 3344), False, 'from django.db import models\n'), ((3360, 3378), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (3376, 3378), False, 'from django.db import models\n'), ((3391, 3410), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (3408, 3410), False, 'from django.db import models\n'), ((3431, 3470), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (3451, 3470), False, 'from django.db import models\n'), ((3972, 4002), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Candidate"""'], {}), "('Candidate')\n", (3989, 4002), False, 'from django.db import models\n'), ((4017, 4045), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Section"""'], {}), "('Section')\n", (4034, 4045), False, 'from django.db import models\n'), ((4061, 4090), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Semester"""'], {}), "('Semester')\n", (4078, 4090), False, 'from django.db import models\n'), ((4292, 4317), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Book"""'], {}), "('Book')\n", (4309, 4317), False, 'from django.db import models\n'), ((4330, 4349), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (4347, 4349), False, 'from django.db import models\n'), ((4366, 4395), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (4393, 4395), False, 'from django.db import models\n'), ((4411, 4461), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (4438, 4461), False, 'from django.db import models\n'), ((4480, 4519), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4500, 4519), False, 'from django.db import models\n'), ((4536, 4579), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (4556, 4579), False, 'from django.db import models\n'), ((4596, 4630), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (4615, 4630), False, 'from django.db import models\n'), ((4649, 4683), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (4668, 4683), False, 'from django.db import models\n'), ((4696, 4750), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'choices': 'STATE_CHOICES'}), '(max_length=10, choices=STATE_CHOICES)\n', (4712, 4750), False, 'from django.db import models\n'), ((4766, 4805), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4782, 4805), False, 'from django.db import models\n'), ((5229, 5261), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5245, 5261), False, 'from django.db import models\n'), ((5356, 5388), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5372, 5388), False, 'from django.db import models\n'), ((5484, 5516), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5500, 5516), False, 'from django.db import models\n'), ((5531, 5562), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (5547, 5562), False, 'from django.db import models\n'), ((5606, 5638), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5622, 5638), False, 'from django.db import models\n'), ((5653, 5684), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (5669, 5684), False, 'from django.db import models\n'), ((5726, 5751), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Book"""'], {}), "('Book')\n", (5743, 5751), False, 'from django.db import models\n'), ((5766, 5794), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Section"""'], {}), "('Section')\n", (5783, 5794), False, 'from django.db import models\n'), ((5810, 5839), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Semester"""'], {}), "('Semester')\n", (5827, 5839), False, 'from django.db import models\n'), ((2863, 2901), 'django.templatetags.static.static', 'static', (['"""polybookexchange/default.png"""'], {}), "('polybookexchange/default.png')\n", (2869, 2901), False, 'from django.templatetags.static import static\n'), ((2514, 2545), 'django.core.files.temp.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'delete': '(True)'}), '(delete=True)\n', (2532, 2545), False, 'from django.core.files.temp import NamedTemporaryFile\n'), ((2671, 2685), 'django.core.files.File', 'File', (['img_temp'], {}), '(img_temp)\n', (2675, 2685), False, 'from django.core.files import File\n'), ((3564, 3569), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (3567, 3569), False, 'from django.utils.timezone import now\n'), ((4921, 4933), 'django.db.models.Min', 'Min', (['"""price"""'], {}), "('price')\n", (4924, 4933), False, 'from django.db.models import Min\n'), ((3534, 3561), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(16)'}), '(days=16)\n', (3552, 3561), False, 'import datetime\n')]
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from urllib import urlencode
import csv
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class TakeALotSpider(BaseSpider):
name = 'takealot.com'
allowed_domains = ['takealot.com']
def start_requests(self):
with open(os.path.join(HERE, 'products.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
sku = row['ProdCode']
url = 'http://www.takealot.com/all/?qsearch=%s&order=price&direction=asc'
yield Request(url % sku, meta={'sku': sku})
def parse(self, response):
hxs = HtmlXPathSelector(response)
product = hxs.select('//li[@class="result-item hproduct"]')
if not product:
return
product = product[0]
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', './/p[@class="p-title fn"]/a/text()')
url = hxs.select('.//p[@class="p-title fn"]/a/@href').extract()[0]
loader.add_value('url', urljoin_rfc(get_base_url(response), url))
loader.add_xpath('price', './/span[@class="amount"]/text()')
loader.add_value('sku', response.meta['sku'])
yield loader.load_item()
|
[
"scrapy.http.Request",
"scrapy.utils.response.get_base_url",
"csv.DictReader",
"os.path.dirname",
"product_spiders.items.Product",
"os.path.join",
"scrapy.selector.HtmlXPathSelector"
] |
[((391, 416), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (406, 416), False, 'import os\n'), ((913, 940), 'scrapy.selector.HtmlXPathSelector', 'HtmlXPathSelector', (['response'], {}), '(response)\n', (930, 940), False, 'from scrapy.selector import HtmlXPathSelector\n'), ((630, 647), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (644, 647), False, 'import csv\n'), ((567, 601), 'os.path.join', 'os.path.join', (['HERE', '"""products.csv"""'], {}), "(HERE, 'products.csv')\n", (579, 601), False, 'import os\n'), ((1118, 1127), 'product_spiders.items.Product', 'Product', ([], {}), '()\n', (1125, 1127), False, 'from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader\n'), ((1337, 1359), 'scrapy.utils.response.get_base_url', 'get_base_url', (['response'], {}), '(response)\n', (1349, 1359), False, 'from scrapy.utils.response import get_base_url\n'), ((829, 866), 'scrapy.http.Request', 'Request', (['(url % sku)'], {'meta': "{'sku': sku}"}), "(url % sku, meta={'sku': sku})\n", (836, 866), False, 'from scrapy.http import Request, HtmlResponse\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 1 16:33:02 2018
@author: <NAME>
"""
# Calculate the distance map between the C-alpha atoms in a protein. The input
# file is required to be a C_alpha coordinate file
import sys
import re
import numpy as np
import matplotlib.pyplot as plt
def get_ca_coordinates (filename):
# parse the c-alpha coordinates from the PDB records
# pdb_records is a list of lines, each line corresponding to a line entry
# in a pdb file
fh = open(filename, 'r')
all_coords = []; # create a multi-dimensional array to store the coordinates
for line_i in fh:
if re.match('^\s*?$', line_i):
pass
elif re.match('^ATOM', line_i):
line_i = line_i.rstrip()
coords_i = line_i[26:54]
coords_i = coords_i.split() # split by white space into individual elements
# convert into integers
coords_i = list(map(float,coords_i)) # convert from string to numeric
all_coords.append(coords_i)
fh.close()
# convert the multi-dimensional array into numpy array
all_coords_ca = np.array(all_coords)
return all_coords_ca
def calculate_ca_dist(ca_coords):
# calculate c-alpha distances
nres = len(ca_coords)
dist_mat = np.zeros((nres,nres), dtype=float) # declare a 0 x 0 numpy matrix
# to store the values
for i in range(0,nres-1):
for j in range(i+1,nres):
diff_ij = ca_coords[i,:]-ca_coords[j,:];
r_ij = np.linalg.norm(diff_ij)
dist_mat[i,j] = r_ij
dist_mat[j,i] = r_ij
return dist_mat
# The main script which will invoke the functions
filename = sys.argv[1]
all_coords_ca = get_ca_coordinates(filename)
dist_mat = calculate_ca_dist(all_coords_ca)
plt.figure()
plt.imshow(dist_mat, cmap='jet')
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"re.match",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linalg.norm"
] |
[((1873, 1885), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1883, 1885), True, 'import matplotlib.pyplot as plt\n'), ((1886, 1918), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dist_mat'], {'cmap': '"""jet"""'}), "(dist_mat, cmap='jet')\n", (1896, 1918), True, 'import matplotlib.pyplot as plt\n'), ((1919, 1929), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1927, 1929), True, 'import matplotlib.pyplot as plt\n'), ((1150, 1170), 'numpy.array', 'np.array', (['all_coords'], {}), '(all_coords)\n', (1158, 1170), True, 'import numpy as np\n'), ((1315, 1350), 'numpy.zeros', 'np.zeros', (['(nres, nres)'], {'dtype': 'float'}), '((nres, nres), dtype=float)\n', (1323, 1350), True, 'import numpy as np\n'), ((647, 674), 're.match', 're.match', (['"""^\\\\s*?$"""', 'line_i'], {}), "('^\\\\s*?$', line_i)\n", (655, 674), False, 'import re\n'), ((705, 730), 're.match', 're.match', (['"""^ATOM"""', 'line_i'], {}), "('^ATOM', line_i)\n", (713, 730), False, 'import re\n'), ((1590, 1613), 'numpy.linalg.norm', 'np.linalg.norm', (['diff_ij'], {}), '(diff_ij)\n', (1604, 1613), True, 'import numpy as np\n')]
|
from setuptools import setup
setup(
name='discord-exchange',
version='0.0.1',
description='A Discord bot to trade on arbitrary quantities',
url='https://github.com/miltfra/discord-exchange',
author='<NAME>',
author_email='<EMAIL>',
license='Apache License 2.0',
packages=['discord_exchange'],
install_requires=[],
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache License 2.0',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
[
"setuptools.setup"
] |
[((30, 662), 'setuptools.setup', 'setup', ([], {'name': '"""discord-exchange"""', 'version': '"""0.0.1"""', 'description': '"""A Discord bot to trade on arbitrary quantities"""', 'url': '"""https://github.com/miltfra/discord-exchange"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""Apache License 2.0"""', 'packages': "['discord_exchange']", 'install_requires': '[]', 'classifiers': "['Development Status :: 1 - Planning',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache License 2.0',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5']"}), "(name='discord-exchange', version='0.0.1', description=\n 'A Discord bot to trade on arbitrary quantities', url=\n 'https://github.com/miltfra/discord-exchange', author='<NAME>',\n author_email='<EMAIL>', license='Apache License 2.0', packages=[\n 'discord_exchange'], install_requires=[], classifiers=[\n 'Development Status :: 1 - Planning',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache License 2.0',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5'])\n", (35, 662), False, 'from setuptools import setup\n')]
|
# coding: utf-8
import pprint
import re
import six
class QueryVmrPkgResResultDTO:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'vmr_pkg_id': 'str',
'vmr_name': 'str',
'vmr_pkg_parties': 'int',
'vmr_pkg_count': 'int',
'vmr_pkg_used_count': 'int'
}
attribute_map = {
'vmr_pkg_id': 'vmrPkgId',
'vmr_name': 'vmrName',
'vmr_pkg_parties': 'vmrPkgParties',
'vmr_pkg_count': 'vmrPkgCount',
'vmr_pkg_used_count': 'vmrPkgUsedCount'
}
def __init__(self, vmr_pkg_id=None, vmr_name=None, vmr_pkg_parties=None, vmr_pkg_count=None, vmr_pkg_used_count=None):
"""QueryVmrPkgResResultDTO - a model defined in huaweicloud sdk"""
self._vmr_pkg_id = None
self._vmr_name = None
self._vmr_pkg_parties = None
self._vmr_pkg_count = None
self._vmr_pkg_used_count = None
self.discriminator = None
if vmr_pkg_id is not None:
self.vmr_pkg_id = vmr_pkg_id
if vmr_name is not None:
self.vmr_name = vmr_name
if vmr_pkg_parties is not None:
self.vmr_pkg_parties = vmr_pkg_parties
if vmr_pkg_count is not None:
self.vmr_pkg_count = vmr_pkg_count
if vmr_pkg_used_count is not None:
self.vmr_pkg_used_count = vmr_pkg_used_count
@property
def vmr_pkg_id(self):
"""Gets the vmr_pkg_id of this QueryVmrPkgResResultDTO.
云会议室套餐包id。
:return: The vmr_pkg_id of this QueryVmrPkgResResultDTO.
:rtype: str
"""
return self._vmr_pkg_id
@vmr_pkg_id.setter
def vmr_pkg_id(self, vmr_pkg_id):
"""Sets the vmr_pkg_id of this QueryVmrPkgResResultDTO.
云会议室套餐包id。
:param vmr_pkg_id: The vmr_pkg_id of this QueryVmrPkgResResultDTO.
:type: str
"""
self._vmr_pkg_id = vmr_pkg_id
@property
def vmr_name(self):
"""Gets the vmr_name of this QueryVmrPkgResResultDTO.
云会议室套餐包名称。
:return: The vmr_name of this QueryVmrPkgResResultDTO.
:rtype: str
"""
return self._vmr_name
@vmr_name.setter
def vmr_name(self, vmr_name):
"""Sets the vmr_name of this QueryVmrPkgResResultDTO.
云会议室套餐包名称。
:param vmr_name: The vmr_name of this QueryVmrPkgResResultDTO.
:type: str
"""
self._vmr_name = vmr_name
@property
def vmr_pkg_parties(self):
"""Gets the vmr_pkg_parties of this QueryVmrPkgResResultDTO.
云会议室套餐方数。
:return: The vmr_pkg_parties of this QueryVmrPkgResResultDTO.
:rtype: int
"""
return self._vmr_pkg_parties
@vmr_pkg_parties.setter
def vmr_pkg_parties(self, vmr_pkg_parties):
"""Sets the vmr_pkg_parties of this QueryVmrPkgResResultDTO.
云会议室套餐方数。
:param vmr_pkg_parties: The vmr_pkg_parties of this QueryVmrPkgResResultDTO.
:type: int
"""
self._vmr_pkg_parties = vmr_pkg_parties
@property
def vmr_pkg_count(self):
"""Gets the vmr_pkg_count of this QueryVmrPkgResResultDTO.
该云会议室套餐分配的总数。
:return: The vmr_pkg_count of this QueryVmrPkgResResultDTO.
:rtype: int
"""
return self._vmr_pkg_count
@vmr_pkg_count.setter
def vmr_pkg_count(self, vmr_pkg_count):
"""Sets the vmr_pkg_count of this QueryVmrPkgResResultDTO.
该云会议室套餐分配的总数。
:param vmr_pkg_count: The vmr_pkg_count of this QueryVmrPkgResResultDTO.
:type: int
"""
self._vmr_pkg_count = vmr_pkg_count
@property
def vmr_pkg_used_count(self):
"""Gets the vmr_pkg_used_count of this QueryVmrPkgResResultDTO.
该套餐对应的云会议室已分配数量。
:return: The vmr_pkg_used_count of this QueryVmrPkgResResultDTO.
:rtype: int
"""
return self._vmr_pkg_used_count
@vmr_pkg_used_count.setter
def vmr_pkg_used_count(self, vmr_pkg_used_count):
"""Sets the vmr_pkg_used_count of this QueryVmrPkgResResultDTO.
该套餐对应的云会议室已分配数量。
:param vmr_pkg_used_count: The vmr_pkg_used_count of this QueryVmrPkgResResultDTO.
:type: int
"""
self._vmr_pkg_used_count = vmr_pkg_used_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QueryVmrPkgResResultDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"six.iteritems"
] |
[((4683, 4716), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (4696, 4716), False, 'import six\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-09-22 00:54:06
# @Author : <NAME> (<EMAIL>)
# @Link : https://github.com/iseesaw
# @Version : $Id$
import os
from EAlib.utils.dataloader import BasicLoader
def BasicLoaderTest():
import os
dirpath = "D:\\ACourse\\2019Fall\\EvolutionaryComputation\\TSP\\tsp"
for file in os.listdir(dirpath):
if file[-4:] == ".tsp":
BasicLoader(os.path.join(dirpath, file)).load()
BasicLoaderTest()
|
[
"os.path.join",
"os.listdir"
] |
[((365, 384), 'os.listdir', 'os.listdir', (['dirpath'], {}), '(dirpath)\n', (375, 384), False, 'import os\n'), ((444, 471), 'os.path.join', 'os.path.join', (['dirpath', 'file'], {}), '(dirpath, file)\n', (456, 471), False, 'import os\n')]
|
from django.conf.urls import patterns, include, url
from pydetector.views import hello
from pydetector.views2 import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^logs/$', logs),
# Examples:
url(r'^$', welcome),
# url(r'^pydetector/', include('pydetector.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
[
"django.contrib.admin.autodiscover",
"django.conf.urls.include",
"django.conf.urls.url"
] |
[((207, 227), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (225, 227), False, 'from django.contrib import admin\n'), ((257, 277), 'django.conf.urls.url', 'url', (['"""^logs/$"""', 'logs'], {}), "('^logs/$', logs)\n", (260, 277), False, 'from django.conf.urls import patterns, include, url\n'), ((301, 319), 'django.conf.urls.url', 'url', (['"""^$"""', 'welcome'], {}), "('^$', welcome)\n", (304, 319), False, 'from django.conf.urls import patterns, include, url\n'), ((597, 621), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (604, 621), False, 'from django.conf.urls import patterns, include, url\n')]
|
from keras.models import Model
from keras.optimizers import Adam, SGD
from keras.layers.core import Dropout, Reshape
from keras.layers import PReLU, Conv2DTranspose
from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, \
concatenate, Activation, ZeroPadding2D
from keras.layers import add, Flatten
from keras.losses import mean_squared_error, binary_crossentropy, sparse_categorical_crossentropy
from keras import losses
import keras.backend as K
import numpy as np
from keras.regularizers import l2
# Check Keras version - code will switch API if needed.
from keras import __version__ as keras_version
k2 = True if keras_version[0] == '2' else False
# If Keras is v2.x.x, create Keras 1-syntax wrappers.
if not k2:
from keras.layers import merge, Input
from keras.layers.convolutional import (Convolution2D, MaxPooling2D,
UpSampling2D)
else:
from keras.layers import Concatenate, Input
from keras.layers.convolutional import (Conv2D, MaxPooling2D,
UpSampling2D)
def merge(layers, mode=None, concat_axis=None):
"""Wrapper for Keras 2's Concatenate class (`mode` is discarded)."""
return Concatenate(axis=concat_axis)(list(layers))
def Convolution2D(n_filters, FL, FLredundant, activation=None,
init=None, W_regularizer=None, border_mode=None):
"""Wrapper for Keras 2's Conv2D class."""
return Conv2D(n_filters, FL, activation=activation,
kernel_initializer=init,
kernel_regularizer=W_regularizer,
padding=border_mode)
def Conv(x, out_channels, dilation_rate=(1, 1)):
return Conv2D(out_channels, kernel_size=(3, 3), strides=(1, 1), dilation_rate=dilation_rate, padding='same')(x)
def UpConv(x, out_channels):
return Conv2DTranspose(out_channels, kernel_size=(3, 3), strides=(2, 2), padding='same', output_padding=(1, 1))(x)
def BN_Conv_Relu(x, out_channels, dilation_rate=(1, 1)):
x = BatchNormalization(axis=3, momentum=0.01)(x)
x = Conv2D(out_channels, kernel_size=(3, 3), strides=(1, 1), dilation_rate=dilation_rate, padding='same')(x)
x = ReLU()(x)
return x
def BN_UpConv_Relu(x, out_channels):
x = BatchNormalization(axis=3, momentum=0.01)(x)
x = UpConv(x, out_channels)
x = Activation('relu')(x)
return x
def ConvOut(x):
return Conv2D(1, kernel_size=(1, 1), strides=(1, 1), padding='valid')(x)
def unet_pooling_3(dim,start_filter,lr=0.0001):
inpt = Input(batch_shape=(None, dim, dim, 1))
BCR3 = BN_Conv_Relu(inpt, start_filter) # BUCR40
BCR4 = BN_Conv_Relu(BCR3, start_filter)
MP5 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(BCR4)
BCR6 = BN_Conv_Relu(MP5, start_filter*2)
BCR7 = BN_Conv_Relu(BCR6, start_filter*2) # BUCR36
BCR8 = BN_Conv_Relu(BCR7, start_filter*2)
MP9 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(BCR8)
BCR10 = BN_Conv_Relu(MP9, start_filter*4)
BCR11 = BN_Conv_Relu(BCR10, start_filter*4) # BUCR32
BCR12 = BN_Conv_Relu(BCR11, start_filter*4)
MP13 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(BCR12)
BCR30 = BN_Conv_Relu(MP13, start_filter*4)
BCR31 = BN_Conv_Relu(BCR30, start_filter*4)
BUCR32 = BN_UpConv_Relu(BCR31, start_filter*4) # BCR11
Add33 = add([BUCR32, BCR11])
BCR34 = BN_Conv_Relu(Add33, start_filter*4)
BCR35 = BN_Conv_Relu(BCR34, start_filter*4)
BUCR36 = BN_UpConv_Relu(BCR35, start_filter*2) # BCR7
Add37 = add([BUCR36, BCR7])
BCR38 = BN_Conv_Relu(Add37, start_filter*2)
BCR39 = BN_Conv_Relu(BCR38, start_filter*2)
BUCR40 = BN_UpConv_Relu(BCR39, start_filter) # BCR3
Add41 = add([BUCR40, BCR3])
BCR42 = BN_Conv_Relu(Add41, start_filter)
BCR43 = BN_Conv_Relu(BCR42, start_filter)
CO44 = ConvOut(BCR43)
out = Conv2D(1, 1, activation='sigmoid', padding='same')(CO44)
out = Reshape((dim, dim))(out)
model = Model(inputs=inpt, outputs=out) # convd2d
optimizer = Adam(lr=lr)
model.compile(loss='binary_crossentropy', metrics=['binary_accuracy'], optimizer=optimizer)
model.summary()
return model
#<NAME>'s UNet for Carter decter
def unet(dim, learn_rate, lmbda, drop, FL, init, n_filters):
"""Function that builds the (UNET) convolutional neural network.
Parameters
----------
dim : int
Dimension of input images (assumes square).
learn_rate : float
Learning rate.
lmbda : float
Convolution2D regularization parameter.
drop : float
Dropout fraction.
FL : int
Filter length.
init : string
Weight initialization type.
n_filters : int
Number of filters in each layer.
Returns
-------
model : keras model object
Constructed Keras model.
"""
print('Making UNET model...')
img_input = Input(batch_shape=(None, dim, dim, 1))
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(img_input)
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1)
a1P = MaxPooling2D((2, 2), strides=(2, 2))(a1)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1P)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2)
a2P = MaxPooling2D((2, 2), strides=(2, 2))(a2)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2P)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3)
a3P = MaxPooling2D((2, 2), strides=(2, 2), )(a3)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3P)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a3, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a2, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a1, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
# Final output
final_activation = 'sigmoid'
u = Convolution2D(1, 1, 1, activation=final_activation, init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Reshape((dim, dim))(u)
if k2:
model = Model(inputs=img_input, outputs=u)
else:
model = Model(input=img_input, output=u)
optimizer = Adam(lr=learn_rate)
model.compile(loss='binary_crossentropy',metrics=['binary_accuracy'], optimizer=optimizer)
print(model.summary())
return model
def unet_ConvT(dim, learn_rate, lmbda, drop, FL, init, n_filters):
"""Function that builds the (UNET) convolutional neural network.
Parameters
----------
dim : int
Dimension of input images (assumes square).
learn_rate : float
Learning rate.
lmbda : float
Convolution2D regularization parameter.
drop : float
Dropout fraction.
FL : int
Filter length.
init : string
Weight initialization type.
n_filters : int
Number of filters in each layer.
Returns
-------
model : keras model object
Constructed Keras model.
"""
print('Making UNET model...')
img_input = Input(batch_shape=(None, dim, dim, 1))
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(img_input)
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1)
a1P = MaxPooling2D((2, 2), strides=(2, 2))(a1)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1P)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2)
a2P = MaxPooling2D((2, 2), strides=(2, 2))(a2)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2P)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3)
a3P = MaxPooling2D((2, 2), strides=(2, 2), )(a3)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3P)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2), padding="same")(u)
u = merge((a3, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Conv2DTranspose(n_filters* 2, (3, 3), strides=(2, 2), padding="same")(u)
u = merge((a2, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Conv2DTranspose(n_filters, (3, 3), strides=(2, 2), padding="same")(u)
u = merge((a1, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
# Final output
final_activation = 'sigmoid'
u = Convolution2D(1, 1, 1, activation=final_activation, init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Reshape((dim, dim))(u)
if k2:
model = Model(inputs=img_input, outputs=u)
else:
model = Model(input=img_input, output=u)
optimizer = Adam(lr=learn_rate)
model.compile(loss='binary_crossentropy',metrics=['binary_accuracy'], optimizer=optimizer)
print(model.summary())
return model
#<NAME>'s UNet deeper
def unet_deeper(dim, learn_rate, lmbda, drop, FL, init, n_filters):
print('Making UNET model...')
img_input = Input(batch_shape=(None, dim, dim, 1))
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(img_input)
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1)
a1P = MaxPooling2D((2, 2), strides=(2, 2))(a1)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1P)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2)
a2P = MaxPooling2D((2, 2), strides=(2, 2))(a2)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2P)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3)
a3P = MaxPooling2D((2, 2), strides=(2, 2), )(a3)
a4 = Convolution2D(n_filters * 8, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3P)
a4 = Convolution2D(n_filters * 8, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a4)
a4P = MaxPooling2D((2, 2), strides=(2, 2), )(a4)
u = Convolution2D(n_filters * 8, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a4P)
u = Convolution2D(n_filters * 8, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a4, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a3, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a2, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a1, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
# Final output
final_activation = 'sigmoid'
u = Convolution2D(1, 1, 1, activation=final_activation, init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Reshape((dim, dim))(u)
if k2:
model = Model(inputs=img_input, outputs=u)
else:
model = Model(input=img_input, output=u)
optimizer = Adam(lr=learn_rate)
model.compile(loss='binary_crossentropy',metrics=['binary_accuracy'], optimizer=optimizer)
print(model.summary())
return model
if __name__ == '__main__':
#simple_resunet_upsample(256,112)#21,368,705
unet_deeper(256,0.0001,1e-6,0.15,3,'he_normal',112)
|
[
"keras.regularizers.l2",
"keras.layers.core.Reshape",
"keras.layers.Activation",
"keras.layers.convolutional.UpSampling2D",
"keras.layers.Dropout",
"keras.layers.add",
"keras.layers.convolutional.MaxPooling2D",
"keras.optimizers.Adam",
"keras.layers.Conv2DTranspose",
"keras.models.Model",
"keras.layers.Concatenate",
"keras.layers.BatchNormalization",
"keras.layers.convolutional.Conv2D",
"keras.layers.Input",
"keras.layers.merge"
] |
[((2608, 2646), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, dim, dim, 1)'}), '(batch_shape=(None, dim, dim, 1))\n', (2613, 2646), False, 'from keras.layers import Concatenate, Input\n'), ((3455, 3475), 'keras.layers.add', 'add', (['[BUCR32, BCR11]'], {}), '([BUCR32, BCR11])\n', (3458, 3475), False, 'from keras.layers import add, Flatten\n'), ((3644, 3663), 'keras.layers.add', 'add', (['[BUCR36, BCR7]'], {}), '([BUCR36, BCR7])\n', (3647, 3663), False, 'from keras.layers import add, Flatten\n'), ((3830, 3849), 'keras.layers.add', 'add', (['[BUCR40, BCR3]'], {}), '([BUCR40, BCR3])\n', (3833, 3849), False, 'from keras.layers import add, Flatten\n'), ((4083, 4114), 'keras.models.Model', 'Model', ([], {'inputs': 'inpt', 'outputs': 'out'}), '(inputs=inpt, outputs=out)\n', (4088, 4114), False, 'from keras.models import Model\n'), ((4142, 4153), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (4146, 4153), False, 'from keras.optimizers import Adam, SGD\n'), ((5001, 5039), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, dim, dim, 1)'}), '(batch_shape=(None, dim, dim, 1))\n', (5006, 5039), False, 'from keras.layers import Concatenate, Input\n'), ((6421, 6465), 'keras.layers.merge', 'merge', (['(a3, u)'], {'mode': '"""concat"""', 'concat_axis': '(3)'}), "((a3, u), mode='concat', concat_axis=3)\n", (6426, 6465), False, 'from keras.layers import merge, Input\n'), ((6822, 6866), 'keras.layers.merge', 'merge', (['(a2, u)'], {'mode': '"""concat"""', 'concat_axis': '(3)'}), "((a2, u), mode='concat', concat_axis=3)\n", (6827, 6866), False, 'from keras.layers import merge, Input\n'), ((7215, 7259), 'keras.layers.merge', 'merge', (['(a1, u)'], {'mode': '"""concat"""', 'concat_axis': '(3)'}), "((a1, u), mode='concat', concat_axis=3)\n", (7220, 7259), False, 'from keras.layers import merge, Input\n'), ((7930, 7949), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learn_rate'}), '(lr=learn_rate)\n', (7934, 7949), False, 'from keras.optimizers import Adam, SGD\n'), ((8775, 8813), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, dim, dim, 1)'}), '(batch_shape=(None, dim, dim, 1))\n', (8780, 8813), False, 'from keras.layers import Concatenate, Input\n'), ((10245, 10289), 'keras.layers.merge', 'merge', (['(a3, u)'], {'mode': '"""concat"""', 'concat_axis': '(3)'}), "((a3, u), mode='concat', concat_axis=3)\n", (10250, 10289), False, 'from keras.layers import merge, Input\n'), ((10695, 10739), 'keras.layers.merge', 'merge', (['(a2, u)'], {'mode': '"""concat"""', 'concat_axis': '(3)'}), "((a2, u), mode='concat', concat_axis=3)\n", (10700, 10739), False, 'from keras.layers import merge, Input\n'), ((11134, 11178), 'keras.layers.merge', 'merge', (['(a1, u)'], {'mode': '"""concat"""', 'concat_axis': '(3)'}), "((a1, u), mode='concat', concat_axis=3)\n", (11139, 11178), False, 'from keras.layers import merge, Input\n'), ((11849, 11868), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learn_rate'}), '(lr=learn_rate)\n', (11853, 11868), False, 'from keras.optimizers import Adam, SGD\n'), ((12148, 12186), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, dim, dim, 1)'}), '(batch_shape=(None, dim, dim, 1))\n', (12153, 12186), False, 'from keras.layers import Concatenate, Input\n'), ((13918, 13962), 'keras.layers.merge', 'merge', (['(a4, u)'], {'mode': '"""concat"""', 'concat_axis': '(3)'}), "((a4, u), mode='concat', concat_axis=3)\n", (13923, 13962), False, 'from keras.layers import merge, Input\n'), ((14319, 14363), 'keras.layers.merge', 'merge', (['(a3, u)'], {'mode': '"""concat"""', 'concat_axis': '(3)'}), "((a3, u), mode='concat', concat_axis=3)\n", (14324, 14363), False, 'from keras.layers import merge, Input\n'), ((14720, 14764), 'keras.layers.merge', 'merge', (['(a2, u)'], {'mode': '"""concat"""', 'concat_axis': '(3)'}), "((a2, u), mode='concat', concat_axis=3)\n", (14725, 14764), False, 'from keras.layers import merge, Input\n'), ((15113, 15157), 'keras.layers.merge', 'merge', (['(a1, u)'], {'mode': '"""concat"""', 'concat_axis': '(3)'}), "((a1, u), mode='concat', concat_axis=3)\n", (15118, 15157), False, 'from keras.layers import merge, Input\n'), ((15828, 15847), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learn_rate'}), '(lr=learn_rate)\n', (15832, 15847), False, 'from keras.optimizers import Adam, SGD\n'), ((1533, 1661), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['n_filters', 'FL'], {'activation': 'activation', 'kernel_initializer': 'init', 'kernel_regularizer': 'W_regularizer', 'padding': 'border_mode'}), '(n_filters, FL, activation=activation, kernel_initializer=init,\n kernel_regularizer=W_regularizer, padding=border_mode)\n', (1539, 1661), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((1784, 1890), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['out_channels'], {'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'dilation_rate': 'dilation_rate', 'padding': '"""same"""'}), "(out_channels, kernel_size=(3, 3), strides=(1, 1), dilation_rate=\n dilation_rate, padding='same')\n", (1790, 1890), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((1929, 2038), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['out_channels'], {'kernel_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""same"""', 'output_padding': '(1, 1)'}), "(out_channels, kernel_size=(3, 3), strides=(2, 2), padding=\n 'same', output_padding=(1, 1))\n", (1944, 2038), False, 'from keras.layers import PReLU, Conv2DTranspose\n'), ((2102, 2143), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)', 'momentum': '(0.01)'}), '(axis=3, momentum=0.01)\n', (2120, 2143), False, 'from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, Activation, ZeroPadding2D\n'), ((2155, 2261), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['out_channels'], {'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'dilation_rate': 'dilation_rate', 'padding': '"""same"""'}), "(out_channels, kernel_size=(3, 3), strides=(1, 1), dilation_rate=\n dilation_rate, padding='same')\n", (2161, 2261), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((2336, 2377), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)', 'momentum': '(0.01)'}), '(axis=3, momentum=0.01)\n', (2354, 2377), False, 'from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, Activation, ZeroPadding2D\n'), ((2421, 2439), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2431, 2439), False, 'from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, Activation, ZeroPadding2D\n'), ((2483, 2545), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(1)'], {'kernel_size': '(1, 1)', 'strides': '(1, 1)', 'padding': '"""valid"""'}), "(1, kernel_size=(1, 1), strides=(1, 1), padding='valid')\n", (2489, 2545), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((2755, 2818), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(pool_size=(2, 2), strides=(2, 2), padding='valid')\n", (2767, 2818), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((2982, 3045), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(pool_size=(2, 2), strides=(2, 2), padding='valid')\n", (2994, 3045), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((3215, 3278), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(pool_size=(2, 2), strides=(2, 2), padding='valid')\n", (3227, 3278), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((3978, 4028), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(1)', '(1)'], {'activation': '"""sigmoid"""', 'padding': '"""same"""'}), "(1, 1, activation='sigmoid', padding='same')\n", (3984, 4028), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((4046, 4065), 'keras.layers.core.Reshape', 'Reshape', (['(dim, dim)'], {}), '((dim, dim))\n', (4053, 4065), False, 'from keras.layers.core import Dropout, Reshape\n'), ((5346, 5382), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (5358, 5382), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((5695, 5731), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (5707, 5731), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((6044, 6080), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (6056, 6080), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((6389, 6409), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (6401, 6409), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((6474, 6487), 'keras.layers.Dropout', 'Dropout', (['drop'], {}), '(drop)\n', (6481, 6487), False, 'from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, Activation, ZeroPadding2D\n'), ((6790, 6810), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (6802, 6810), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((6875, 6888), 'keras.layers.Dropout', 'Dropout', (['drop'], {}), '(drop)\n', (6882, 6888), False, 'from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, Activation, ZeroPadding2D\n'), ((7183, 7203), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (7195, 7203), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((7268, 7281), 'keras.layers.Dropout', 'Dropout', (['drop'], {}), '(drop)\n', (7275, 7281), False, 'from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, Activation, ZeroPadding2D\n'), ((7769, 7788), 'keras.layers.core.Reshape', 'Reshape', (['(dim, dim)'], {}), '((dim, dim))\n', (7776, 7788), False, 'from keras.layers.core import Dropout, Reshape\n'), ((7819, 7853), 'keras.models.Model', 'Model', ([], {'inputs': 'img_input', 'outputs': 'u'}), '(inputs=img_input, outputs=u)\n', (7824, 7853), False, 'from keras.models import Model\n'), ((7880, 7912), 'keras.models.Model', 'Model', ([], {'input': 'img_input', 'output': 'u'}), '(input=img_input, output=u)\n', (7885, 7912), False, 'from keras.models import Model\n'), ((9120, 9156), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (9132, 9156), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((9469, 9505), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (9481, 9505), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((9818, 9854), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (9830, 9854), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((10163, 10233), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(n_filters * 4)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(n_filters * 4, (3, 3), strides=(2, 2), padding='same')\n", (10178, 10233), False, 'from keras.layers import PReLU, Conv2DTranspose\n'), ((10298, 10311), 'keras.layers.Dropout', 'Dropout', (['drop'], {}), '(drop)\n', (10305, 10311), False, 'from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, Activation, ZeroPadding2D\n'), ((10614, 10684), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(n_filters * 2)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(n_filters * 2, (3, 3), strides=(2, 2), padding='same')\n", (10629, 10684), False, 'from keras.layers import PReLU, Conv2DTranspose\n'), ((10748, 10761), 'keras.layers.Dropout', 'Dropout', (['drop'], {}), '(drop)\n', (10755, 10761), False, 'from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, Activation, ZeroPadding2D\n'), ((11056, 11122), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['n_filters', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(n_filters, (3, 3), strides=(2, 2), padding='same')\n", (11071, 11122), False, 'from keras.layers import PReLU, Conv2DTranspose\n'), ((11187, 11200), 'keras.layers.Dropout', 'Dropout', (['drop'], {}), '(drop)\n', (11194, 11200), False, 'from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, Activation, ZeroPadding2D\n'), ((11688, 11707), 'keras.layers.core.Reshape', 'Reshape', (['(dim, dim)'], {}), '((dim, dim))\n', (11695, 11707), False, 'from keras.layers.core import Dropout, Reshape\n'), ((11738, 11772), 'keras.models.Model', 'Model', ([], {'inputs': 'img_input', 'outputs': 'u'}), '(inputs=img_input, outputs=u)\n', (11743, 11772), False, 'from keras.models import Model\n'), ((11799, 11831), 'keras.models.Model', 'Model', ([], {'input': 'img_input', 'output': 'u'}), '(input=img_input, output=u)\n', (11804, 11831), False, 'from keras.models import Model\n'), ((12493, 12529), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (12505, 12529), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((12842, 12878), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (12854, 12878), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((13191, 13227), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (13203, 13227), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((13542, 13578), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (13554, 13578), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((13886, 13906), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (13898, 13906), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((13971, 13984), 'keras.layers.Dropout', 'Dropout', (['drop'], {}), '(drop)\n', (13978, 13984), False, 'from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, Activation, ZeroPadding2D\n'), ((14287, 14307), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (14299, 14307), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((14372, 14385), 'keras.layers.Dropout', 'Dropout', (['drop'], {}), '(drop)\n', (14379, 14385), False, 'from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, Activation, ZeroPadding2D\n'), ((14688, 14708), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (14700, 14708), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((14773, 14786), 'keras.layers.Dropout', 'Dropout', (['drop'], {}), '(drop)\n', (14780, 14786), False, 'from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, Activation, ZeroPadding2D\n'), ((15081, 15101), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (15093, 15101), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D\n'), ((15166, 15179), 'keras.layers.Dropout', 'Dropout', (['drop'], {}), '(drop)\n', (15173, 15179), False, 'from keras.layers import Input, Dense, Dropout, LeakyReLU, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, Activation, ZeroPadding2D\n'), ((15667, 15686), 'keras.layers.core.Reshape', 'Reshape', (['(dim, dim)'], {}), '((dim, dim))\n', (15674, 15686), False, 'from keras.layers.core import Dropout, Reshape\n'), ((15717, 15751), 'keras.models.Model', 'Model', ([], {'inputs': 'img_input', 'outputs': 'u'}), '(inputs=img_input, outputs=u)\n', (15722, 15751), False, 'from keras.models import Model\n'), ((15778, 15810), 'keras.models.Model', 'Model', ([], {'input': 'img_input', 'output': 'u'}), '(input=img_input, output=u)\n', (15783, 15810), False, 'from keras.models import Model\n'), ((1283, 1312), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': 'concat_axis'}), '(axis=concat_axis)\n', (1294, 1312), False, 'from keras.layers import Concatenate, Input\n'), ((5150, 5159), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (5152, 5159), False, 'from keras.regularizers import l2\n'), ((5301, 5310), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (5303, 5310), False, 'from keras.regularizers import l2\n'), ((5501, 5510), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (5503, 5510), False, 'from keras.regularizers import l2\n'), ((5650, 5659), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (5652, 5659), False, 'from keras.regularizers import l2\n'), ((5850, 5859), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (5852, 5859), False, 'from keras.regularizers import l2\n'), ((5999, 6008), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (6001, 6008), False, 'from keras.regularizers import l2\n'), ((6199, 6208), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (6201, 6208), False, 'from keras.regularizers import l2\n'), ((6346, 6355), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (6348, 6355), False, 'from keras.regularizers import l2\n'), ((6602, 6611), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (6604, 6611), False, 'from keras.regularizers import l2\n'), ((6747, 6756), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (6749, 6756), False, 'from keras.regularizers import l2\n'), ((6999, 7008), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (7001, 7008), False, 'from keras.regularizers import l2\n'), ((7140, 7149), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (7142, 7149), False, 'from keras.regularizers import l2\n'), ((7392, 7401), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (7394, 7401), False, 'from keras.regularizers import l2\n'), ((7533, 7542), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (7535, 7542), False, 'from keras.regularizers import l2\n'), ((7727, 7736), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (7729, 7736), False, 'from keras.regularizers import l2\n'), ((8924, 8933), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (8926, 8933), False, 'from keras.regularizers import l2\n'), ((9075, 9084), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (9077, 9084), False, 'from keras.regularizers import l2\n'), ((9275, 9284), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (9277, 9284), False, 'from keras.regularizers import l2\n'), ((9424, 9433), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (9426, 9433), False, 'from keras.regularizers import l2\n'), ((9624, 9633), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (9626, 9633), False, 'from keras.regularizers import l2\n'), ((9773, 9782), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (9775, 9782), False, 'from keras.regularizers import l2\n'), ((9973, 9982), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (9975, 9982), False, 'from keras.regularizers import l2\n'), ((10120, 10129), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (10122, 10129), False, 'from keras.regularizers import l2\n'), ((10426, 10435), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (10428, 10435), False, 'from keras.regularizers import l2\n'), ((10571, 10580), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (10573, 10580), False, 'from keras.regularizers import l2\n'), ((10872, 10881), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (10874, 10881), False, 'from keras.regularizers import l2\n'), ((11013, 11022), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (11015, 11022), False, 'from keras.regularizers import l2\n'), ((11311, 11320), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (11313, 11320), False, 'from keras.regularizers import l2\n'), ((11452, 11461), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (11454, 11461), False, 'from keras.regularizers import l2\n'), ((11646, 11655), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (11648, 11655), False, 'from keras.regularizers import l2\n'), ((12297, 12306), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (12299, 12306), False, 'from keras.regularizers import l2\n'), ((12448, 12457), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (12450, 12457), False, 'from keras.regularizers import l2\n'), ((12648, 12657), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (12650, 12657), False, 'from keras.regularizers import l2\n'), ((12797, 12806), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (12799, 12806), False, 'from keras.regularizers import l2\n'), ((12997, 13006), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (12999, 13006), False, 'from keras.regularizers import l2\n'), ((13146, 13155), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (13148, 13155), False, 'from keras.regularizers import l2\n'), ((13348, 13357), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (13350, 13357), False, 'from keras.regularizers import l2\n'), ((13497, 13506), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (13499, 13506), False, 'from keras.regularizers import l2\n'), ((13696, 13705), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (13698, 13705), False, 'from keras.regularizers import l2\n'), ((13843, 13852), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (13845, 13852), False, 'from keras.regularizers import l2\n'), ((14099, 14108), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (14101, 14108), False, 'from keras.regularizers import l2\n'), ((14244, 14253), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (14246, 14253), False, 'from keras.regularizers import l2\n'), ((14500, 14509), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (14502, 14509), False, 'from keras.regularizers import l2\n'), ((14645, 14654), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (14647, 14654), False, 'from keras.regularizers import l2\n'), ((14897, 14906), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (14899, 14906), False, 'from keras.regularizers import l2\n'), ((15038, 15047), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (15040, 15047), False, 'from keras.regularizers import l2\n'), ((15290, 15299), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (15292, 15299), False, 'from keras.regularizers import l2\n'), ((15431, 15440), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (15433, 15440), False, 'from keras.regularizers import l2\n'), ((15625, 15634), 'keras.regularizers.l2', 'l2', (['lmbda'], {}), '(lmbda)\n', (15627, 15634), False, 'from keras.regularizers import l2\n')]
|
from datetime import datetime
from app.models import db
__all__ = ['Message']
class Message(db.Model):
"""Message
Messages only have an author. They are either connected to a working
group (AG) or to an individual user (recepient).
:param id:
:param message:
:param time:
:param author_id:
:param ag_id:
:param recepient_id:
Relationships:
- users_messages: status of a message (recepient only)
"""
__tablename__ = 'messages'
id = db.Column(db.Integer, primary_key=True, unique=True, nullable=False)
message = db.Column(db.Text(1000), nullable=False)
time = db.Column(db.DateTime, default=datetime.now())
author_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=True)
ag_id = db.Column(db.Integer, db.ForeignKey('ags.id'), nullable=True)
recepient_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=True)
author = db.relationship(
'User',
back_populates='messages',
primaryjoin='User.id == Message.author_id',)
recepient = db.relationship(
'User',
back_populates='messages',
primaryjoin='User.id == Message.recepient_id',)
ag = db.relationship('AG', back_populates='messages')
|
[
"app.models.db.Column",
"app.models.db.ForeignKey",
"app.models.db.relationship",
"datetime.datetime.now",
"app.models.db.Text"
] |
[((500, 568), 'app.models.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)', 'unique': '(True)', 'nullable': '(False)'}), '(db.Integer, primary_key=True, unique=True, nullable=False)\n', (509, 568), False, 'from app.models import db\n'), ((934, 1033), 'app.models.db.relationship', 'db.relationship', (['"""User"""'], {'back_populates': '"""messages"""', 'primaryjoin': '"""User.id == Message.author_id"""'}), "('User', back_populates='messages', primaryjoin=\n 'User.id == Message.author_id')\n", (949, 1033), False, 'from app.models import db\n'), ((1071, 1173), 'app.models.db.relationship', 'db.relationship', (['"""User"""'], {'back_populates': '"""messages"""', 'primaryjoin': '"""User.id == Message.recepient_id"""'}), "('User', back_populates='messages', primaryjoin=\n 'User.id == Message.recepient_id')\n", (1086, 1173), False, 'from app.models import db\n'), ((1204, 1252), 'app.models.db.relationship', 'db.relationship', (['"""AG"""'], {'back_populates': '"""messages"""'}), "('AG', back_populates='messages')\n", (1219, 1252), False, 'from app.models import db\n'), ((593, 606), 'app.models.db.Text', 'db.Text', (['(1000)'], {}), '(1000)\n', (600, 606), False, 'from app.models import db\n'), ((720, 745), 'app.models.db.ForeignKey', 'db.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (733, 745), False, 'from app.models import db\n'), ((797, 820), 'app.models.db.ForeignKey', 'db.ForeignKey', (['"""ags.id"""'], {}), "('ags.id')\n", (810, 820), False, 'from app.models import db\n'), ((878, 903), 'app.models.db.ForeignKey', 'db.ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (891, 903), False, 'from app.models import db\n'), ((666, 680), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (678, 680), False, 'from datetime import datetime\n')]
|
import datajoint as dj
from djsubject import subject
from djlab import lab
from djephys import ephys
from my_project.utils import get_ephys_probe_data_dir, get_ks_data_dir
# ============== Declare "lab" and "subject" schema ==============
lab.declare('u24_lab')
subject.declare('u24_subject',
dependencies={'Source': lab.Source,
'Lab': lab.Lab,
'Protocol': lab.Protocol,
'User': lab.User})
# ============== Declare Session table ==============
schema = dj.schema('u24_experiment')
@schema
class Session(dj.Manual):
definition = """
-> subject.Subject
session_datetime: datetime
"""
# ============== Declare "ephys" schema ==============
ephys.declare(dj.schema('u24_ephys'),
dependencies={'Subject': subject.Subject,
'Session': Session,
'Location': lab.Location,
'get_npx_data_dir': get_ephys_probe_data_dir,
'get_ks_data_dir': get_ks_data_dir})
# ---- Add neuropixels probes ----
for probe_type in ('neuropixels 1.0 - 3A', 'neuropixels 1.0 - 3B',
'neuropixels 2.0 - SS', 'neuropixels 2.0 - MS'):
ephys.ProbeType.create_neuropixels_probe(probe_type)
|
[
"djephys.ephys.ProbeType.create_neuropixels_probe",
"djlab.lab.declare",
"datajoint.schema",
"djsubject.subject.declare"
] |
[((241, 263), 'djlab.lab.declare', 'lab.declare', (['"""u24_lab"""'], {}), "('u24_lab')\n", (252, 263), False, 'from djlab import lab\n'), ((265, 396), 'djsubject.subject.declare', 'subject.declare', (['"""u24_subject"""'], {'dependencies': "{'Source': lab.Source, 'Lab': lab.Lab, 'Protocol': lab.Protocol, 'User':\n lab.User}"}), "('u24_subject', dependencies={'Source': lab.Source, 'Lab':\n lab.Lab, 'Protocol': lab.Protocol, 'User': lab.User})\n", (280, 396), False, 'from djsubject import subject\n'), ((564, 591), 'datajoint.schema', 'dj.schema', (['"""u24_experiment"""'], {}), "('u24_experiment')\n", (573, 591), True, 'import datajoint as dj\n'), ((783, 805), 'datajoint.schema', 'dj.schema', (['"""u24_ephys"""'], {}), "('u24_ephys')\n", (792, 805), True, 'import datajoint as dj\n'), ((1280, 1332), 'djephys.ephys.ProbeType.create_neuropixels_probe', 'ephys.ProbeType.create_neuropixels_probe', (['probe_type'], {}), '(probe_type)\n', (1320, 1332), False, 'from djephys import ephys\n')]
|
from django.conf.urls import url
from django.utils.translation import ugettext_lazy as _
from aristotle_mdr.contrib.generic.views import GenericAlterOneToManyView, generic_foreign_key_factory_view
from daedalus_data_dictionary.storage import models
urlpatterns = [
url(r'^dictionary/(?P<iid>\d+)?/edit/?$',
GenericAlterOneToManyView.as_view(
model_base=models.DataDictionary,
model_to_add=models.DataDictionaryInclusion,
model_base_field='datadictionaryinclusion_set',
model_to_add_field='dictionary',
#ordering_field='order',
form_add_another_text=_('Add a metadata concept'),
form_title=_('Change dictionary concept entries')
), name='data_dictionary_edit'),
]
|
[
"django.utils.translation.ugettext_lazy"
] |
[((635, 662), 'django.utils.translation.ugettext_lazy', '_', (['"""Add a metadata concept"""'], {}), "('Add a metadata concept')\n", (636, 662), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((687, 725), 'django.utils.translation.ugettext_lazy', '_', (['"""Change dictionary concept entries"""'], {}), "('Change dictionary concept entries')\n", (688, 725), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
# coding=utf-8
# Author: <NAME> <<EMAIL>>
import numpy as np
from torch import nn
from torch.nn import Parameter
from eeggan.pytorch.modules.conv.multiconv import MultiConv1d
class WeightScale(object):
"""
Implemented for PyTorch using WeightNorm implementation
https://pytorch.org/docs/stable/_modules/torch/nn/utils/weight_norm.html
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>. (2017).
Progressive Growing of GANs for Improved Quality, Stability,
and Variation. Retrieved from http://arxiv.org/abs/1710.10196
"""
def __init__(self, name):
self.name = name
def compute_weight(self, module):
w = getattr(module, self.name + '_unscaled')
c = getattr(module, self.name + '_c')
tmp = c * w
return tmp
@staticmethod
def apply(module, name, gain):
fn = WeightScale(name)
weight = getattr(module, name)
# remove w from parameter list
del module._parameters[name]
# Constant from He et al. 2015
c = gain / np.sqrt(np.prod(list(weight.size())[1:]))
setattr(module, name + '_c', float(c))
module.register_parameter(name + '_unscaled', nn.Parameter(weight.data))
setattr(module, name, fn.compute_weight(module))
# recompute weight before every forward()
module.register_forward_pre_hook(fn)
return fn
def remove(self, module):
weight = self.compute_weight(module)
delattr(module, self.name)
del module._parameters[self.name + '_unscaled']
del module._parameters[self.name + '_c']
module.register_parameter(self.name, Parameter(weight.data))
def __call__(self, module, inputs, **kwargs):
setattr(module, self.name, self.compute_weight(module))
def weight_scale(module, gain=np.sqrt(2), name='weight'):
"""
Applies equalized learning rate to weights
Parameters
----------
module : module
Module scaling should be applied to (Conv/Linear)
gain : float
Gain of following activation layer
See torch.nn.init.calculate_gain
"""
if isinstance(module, MultiConv1d):
for i in range(len(module.convs)):
WeightScale.apply(module.convs[i], name, gain)
else:
WeightScale.apply(module, name, gain)
return module
def remove_weight_scale(module, name='weight'):
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, WeightScale) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("weight_scale of '{}' not found in {}"
.format(name, module))
|
[
"torch.nn.Parameter",
"numpy.sqrt"
] |
[((1836, 1846), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1843, 1846), True, 'import numpy as np\n'), ((1207, 1232), 'torch.nn.Parameter', 'nn.Parameter', (['weight.data'], {}), '(weight.data)\n', (1219, 1232), False, 'from torch import nn\n'), ((1665, 1687), 'torch.nn.Parameter', 'Parameter', (['weight.data'], {}), '(weight.data)\n', (1674, 1687), False, 'from torch.nn import Parameter\n')]
|
'''
Module containing Tracer metaclass and associated trace decorator.
'''
from types import FunctionType
from functools import wraps
import traceback
from pprint import pprint
import sys
class Tracer(type):
def __new__(cls, name, bases, cls_dct):
wrapped_cls_dct = {}
for attribute_name, attribute in cls_dct.items():
if attribute_name != '__init__':
wrapped_cls_dct[attribute_name] = trace(attribute) if isinstance(attribute, FunctionType) else attribute
else: # overwrite __init__ method to inject instance-level changes
def injected_init(self, *args, **kwargs):
self._trace = []
self.print_trace = lambda: pprint(self._trace, indent=4, depth=3)
cls_dct['__init__'](self, *args, **kwargs) # call existing __init__ after '_trace' attr is added
wrapped_cls_dct['__init__'] = injected_init
return super().__new__(cls, name, bases, wrapped_cls_dct)
def trace(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
self._trace.append((len(self._trace) + 1, method.__name__, args, kwargs))
try:
return method(self, *args, **kwargs)
except:
traceback.print_exc()
print('\n\n ----- ERROR! Execution failed with above traceback. -----\nBelow is the Object\'s method call trace.')
print(self)
pprint(self._trace, indent=4, depth=3)
sys.exit()
return wrapper
|
[
"sys.exit",
"traceback.print_exc",
"pprint.pprint",
"functools.wraps"
] |
[((1038, 1051), 'functools.wraps', 'wraps', (['method'], {}), '(method)\n', (1043, 1051), False, 'from functools import wraps\n'), ((1264, 1285), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1283, 1285), False, 'import traceback\n'), ((1452, 1490), 'pprint.pprint', 'pprint', (['self._trace'], {'indent': '(4)', 'depth': '(3)'}), '(self._trace, indent=4, depth=3)\n', (1458, 1490), False, 'from pprint import pprint\n'), ((1503, 1513), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1511, 1513), False, 'import sys\n'), ((729, 767), 'pprint.pprint', 'pprint', (['self._trace'], {'indent': '(4)', 'depth': '(3)'}), '(self._trace, indent=4, depth=3)\n', (735, 767), False, 'from pprint import pprint\n')]
|
"""Rest Api views."""
from rest_framework.views import APIView
from rest_framework.response import Response
from .rectotext import rec_to_text
from .search import find
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from gtts import gTTS
import os
from askme.settings import BASE_DIR
@method_decorator(csrf_exempt, name='dispatch')
class AskViewApi(APIView):
"""Using apiview."""
def post(self, request):
"""Upload audio file."""
try:
f = request.FILES['file']
uploadedFile = open(os.path.join(BASE_DIR, "askme_api/assets/file.wav", "wb"))
uploadedFile.write(f.read())
uploadedFile.close()
question = rec_to_text()
answer = find(question)
except KeyError:
answer = "Sorry we have some connection problems.\
I didn't catch your request"
return JsonResponse({'answer': answer})
@method_decorator(csrf_exempt, name='dispatch')
class AudioViewApi(APIView):
"""Using apiview."""
def post(self, request):
"""Upload audio file."""
try:
uploadedFile = open(os.path.join(BASE_DIR, 'askme/assets/file.wav'), 'wb')
f = request.FILES['data']
uploadedFile.write(f.read())
uploadedFile.close()
question = rec_to_text()
answer = find(question)
except KeyError:
answer = 'I am sorry. We have some connection issues.\
I couldn\'t get get Your file'
tts = gTTS(text=answer, lang='en')
fname = os.path.join(BASE_DIR, "askme/assets/good.mp3")
tts.save(fname)
f = open(fname, "rb")
response = HttpResponse()
response.write(f.read())
response['Content-Type'] = 'audio/mp3'
response['Content-Length'] = os.path.getsize(fname)
return response
|
[
"django.utils.decorators.method_decorator",
"django.http.HttpResponse",
"gtts.gTTS",
"os.path.getsize",
"django.http.JsonResponse",
"os.path.join"
] |
[((395, 441), 'django.utils.decorators.method_decorator', 'method_decorator', (['csrf_exempt'], {'name': '"""dispatch"""'}), "(csrf_exempt, name='dispatch')\n", (411, 441), False, 'from django.utils.decorators import method_decorator\n'), ((1015, 1061), 'django.utils.decorators.method_decorator', 'method_decorator', (['csrf_exempt'], {'name': '"""dispatch"""'}), "(csrf_exempt, name='dispatch')\n", (1031, 1061), False, 'from django.utils.decorators import method_decorator\n'), ((979, 1011), 'django.http.JsonResponse', 'JsonResponse', (["{'answer': answer}"], {}), "({'answer': answer})\n", (991, 1011), False, 'from django.http import HttpResponse, JsonResponse\n'), ((1602, 1630), 'gtts.gTTS', 'gTTS', ([], {'text': 'answer', 'lang': '"""en"""'}), "(text=answer, lang='en')\n", (1606, 1630), False, 'from gtts import gTTS\n'), ((1647, 1694), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""askme/assets/good.mp3"""'], {}), "(BASE_DIR, 'askme/assets/good.mp3')\n", (1659, 1694), False, 'import os\n'), ((1769, 1783), 'django.http.HttpResponse', 'HttpResponse', ([], {}), '()\n', (1781, 1783), False, 'from django.http import HttpResponse, JsonResponse\n'), ((1901, 1923), 'os.path.getsize', 'os.path.getsize', (['fname'], {}), '(fname)\n', (1916, 1923), False, 'import os\n'), ((640, 697), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""askme_api/assets/file.wav"""', '"""wb"""'], {}), "(BASE_DIR, 'askme_api/assets/file.wav', 'wb')\n", (652, 697), False, 'import os\n'), ((1224, 1271), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""askme/assets/file.wav"""'], {}), "(BASE_DIR, 'askme/assets/file.wav')\n", (1236, 1271), False, 'import os\n')]
|
from pydantic import BaseModel, validator, Field
from typing import List, Dict
from datetime import datetime
class IndicesIn(BaseModel):
index: str
source: str
sourcetype: str
class IndiceList(BaseModel):
indices: List[IndicesIn]
integration: str
execution_date: datetime
@validator('execution_date', pre=True, always=True)
def _get_execution_date(cls, v):
return datetime.strptime(v, '%Y-%m-%dT%H:%M:%S.%f')
|
[
"datetime.datetime.strptime",
"pydantic.validator"
] |
[((305, 355), 'pydantic.validator', 'validator', (['"""execution_date"""'], {'pre': '(True)', 'always': '(True)'}), "('execution_date', pre=True, always=True)\n", (314, 355), False, 'from pydantic import BaseModel, validator, Field\n'), ((408, 452), 'datetime.datetime.strptime', 'datetime.strptime', (['v', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(v, '%Y-%m-%dT%H:%M:%S.%f')\n", (425, 452), False, 'from datetime import datetime\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import configparser
import os
class Config:
__v2ray_core_path = None
__v2ray_node_path = None
def __init__(self):
self.config = configparser.ConfigParser()
parent_dir = os.path.dirname(os.path.abspath(__file__))
self.config_path = os.path.join(Config.__v2ray_core_path, 'config.json')
self.json_path = os.path.join(parent_dir, 'json_template')
# self.config.read(self.config_path)
def get_path(self, key):
# return self.config.get('path', key)
return self.config_path
def get_data(self, key):
return self.config.get('data', key)
def set_data(self, key, value):
self.config.set('data', key, value)
self.config.write(open(self.config_path, "w"))
@staticmethod
def set_v2ray_core_path(dir: str):
"""设置当前v2ray_core程序的目录"""
Config.__v2ray_core_path = dir
@staticmethod
def get_v2ray_core_path():
"""获取当前v2ray_core程序的目录"""
return Config.__v2ray_core_path
@staticmethod
def set_v2ray_node_path(dir: str):
"""设置当前v2ray保存节点的目录"""
Config.__v2ray_node_path = dir
@staticmethod
def get_v2ray_node_path():
"""获取当前v2ray保存节点的目录"""
return Config.__v2ray_node_path
|
[
"os.path.abspath",
"configparser.ConfigParser",
"os.path.join"
] |
[((199, 226), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (224, 226), False, 'import configparser\n'), ((318, 371), 'os.path.join', 'os.path.join', (['Config.__v2ray_core_path', '"""config.json"""'], {}), "(Config.__v2ray_core_path, 'config.json')\n", (330, 371), False, 'import os\n'), ((397, 438), 'os.path.join', 'os.path.join', (['parent_dir', '"""json_template"""'], {}), "(parent_dir, 'json_template')\n", (409, 438), False, 'import os\n'), ((264, 289), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (279, 289), False, 'import os\n')]
|
import logging
import os
import asyncio
from cryptoxlib.CryptoXLib import CryptoXLib
from cryptoxlib.PeriodicChecker import PeriodicChecker
from cryptoxlib.Pair import Pair
from cryptoxlib.clients.binance.BinanceClient import BinanceClient
from cryptoxlib.clients.binance.BinanceWebsocket import OrderBookSymbolTickerSubscription
from cryptoxlib.version_conversions import async_run
LOG = logging.getLogger("cryptoxlib")
LOG.setLevel(logging.INFO)
LOG.addHandler(logging.StreamHandler())
print(f"Available loggers: {[name for name in logging.root.manager.loggerDict]}\n")
async def order_book_update(response: dict) -> None:
pass
class Subscriptions:
def __init__(self):
self.subscriptions = [
[
OrderBookSymbolTickerSubscription(Pair("BTC", "USDT"), callbacks = [self.call1]),
OrderBookSymbolTickerSubscription(Pair("ETH", "USDT"), callbacks = [self.call1])
],
[
OrderBookSymbolTickerSubscription(Pair("BNB", "USDT"), callbacks = [self.call2]),
OrderBookSymbolTickerSubscription(Pair("XRP", "USDT"), callbacks = [self.call2])
],
[
OrderBookSymbolTickerSubscription(Pair("ADA", "USDT"), callbacks = [self.call3]),
OrderBookSymbolTickerSubscription(Pair("DOT", "USDT"), callbacks = [self.call3])
]
]
self.subscription_set_ids = []
self.timers = [
PeriodicChecker(100),
PeriodicChecker(100),
PeriodicChecker(100)
]
async def call1(self, response : dict):
if self.timers[0].check():
print(response)
async def call2(self, response : dict):
if self.timers[1].check():
print(response)
async def call3(self, response : dict):
if self.timers[2].check():
print(response)
# global container for various subscription compositions
sub = Subscriptions()
async def main_loop(client: BinanceClient) -> None:
i = 0
sleep_sec = 1
while True:
if i == 3:
print("Unsubscribing BTC/USDT")
await client.unsubscribe_subscriptions([sub.subscriptions[0][0]])
if i == 6:
print("Unsubscribing BNB/USDT")
await client.unsubscribe_subscriptions([sub.subscriptions[1][0]])
if i == 9:
print("Unsubscribing ADA/USDT and DOT/USDT")
await client.unsubscribe_subscription_set(sub.subscription_set_ids[2])
if i == 12:
print("Unsubscribing all")
await client.unsubscribe_all()
if i == 15:
print("Subscribe BNB/BTC")
await client.add_subscriptions(sub.subscription_set_ids[0],
[OrderBookSymbolTickerSubscription(Pair("BNB", "BTC"), callbacks = [sub.call1])])
if i == 18:
print("Subscribe ETH/USDT again")
await client.add_subscriptions(sub.subscription_set_ids[0],
[OrderBookSymbolTickerSubscription(Pair("ETH", "USDT"), callbacks = [sub.call1])])
if i == 21:
print("Subscribe ADA/USDT and XRP/USDT again")
await client.add_subscriptions(sub.subscription_set_ids[1],
[OrderBookSymbolTickerSubscription(Pair("ADA", "USDT"), callbacks = [sub.call2]),
OrderBookSymbolTickerSubscription(Pair("XRP", "USDT"), callbacks = [sub.call2])])
if i == 24:
print("Shutting down websockets.")
await client.shutdown_websockets()
if i == 27:
print("Quitting the main loop.")
break
i += 1
await asyncio.sleep(sleep_sec)
async def run():
api_key = os.environ['APIKEY']
sec_key = os.environ['SECKEY']
client = CryptoXLib.create_binance_client(api_key, sec_key)
# initialize three independent websockets
sub.subscription_set_ids.append(client.compose_subscriptions(sub.subscriptions[0]))
sub.subscription_set_ids.append(client.compose_subscriptions(sub.subscriptions[1]))
sub.subscription_set_ids.append(client.compose_subscriptions(sub.subscriptions[2]))
try:
await asyncio.gather(*[
client.start_websockets(),
main_loop(client)
])
except Exception as e:
print(f"Out: {e}")
await client.close()
print("Exiting.")
if __name__ == "__main__":
async_run(run())
|
[
"asyncio.sleep",
"logging.StreamHandler",
"cryptoxlib.PeriodicChecker.PeriodicChecker",
"cryptoxlib.Pair.Pair",
"logging.getLogger",
"cryptoxlib.CryptoXLib.CryptoXLib.create_binance_client"
] |
[((392, 423), 'logging.getLogger', 'logging.getLogger', (['"""cryptoxlib"""'], {}), "('cryptoxlib')\n", (409, 423), False, 'import logging\n'), ((466, 489), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (487, 489), False, 'import logging\n'), ((3904, 3954), 'cryptoxlib.CryptoXLib.CryptoXLib.create_binance_client', 'CryptoXLib.create_binance_client', (['api_key', 'sec_key'], {}), '(api_key, sec_key)\n', (3936, 3954), False, 'from cryptoxlib.CryptoXLib import CryptoXLib\n'), ((1474, 1494), 'cryptoxlib.PeriodicChecker.PeriodicChecker', 'PeriodicChecker', (['(100)'], {}), '(100)\n', (1489, 1494), False, 'from cryptoxlib.PeriodicChecker import PeriodicChecker\n'), ((1508, 1528), 'cryptoxlib.PeriodicChecker.PeriodicChecker', 'PeriodicChecker', (['(100)'], {}), '(100)\n', (1523, 1528), False, 'from cryptoxlib.PeriodicChecker import PeriodicChecker\n'), ((1542, 1562), 'cryptoxlib.PeriodicChecker.PeriodicChecker', 'PeriodicChecker', (['(100)'], {}), '(100)\n', (1557, 1562), False, 'from cryptoxlib.PeriodicChecker import PeriodicChecker\n'), ((3776, 3800), 'asyncio.sleep', 'asyncio.sleep', (['sleep_sec'], {}), '(sleep_sec)\n', (3789, 3800), False, 'import asyncio\n'), ((782, 801), 'cryptoxlib.Pair.Pair', 'Pair', (['"""BTC"""', '"""USDT"""'], {}), "('BTC', 'USDT')\n", (786, 801), False, 'from cryptoxlib.Pair import Pair\n'), ((880, 899), 'cryptoxlib.Pair.Pair', 'Pair', (['"""ETH"""', '"""USDT"""'], {}), "('ETH', 'USDT')\n", (884, 899), False, 'from cryptoxlib.Pair import Pair\n'), ((1006, 1025), 'cryptoxlib.Pair.Pair', 'Pair', (['"""BNB"""', '"""USDT"""'], {}), "('BNB', 'USDT')\n", (1010, 1025), False, 'from cryptoxlib.Pair import Pair\n'), ((1104, 1123), 'cryptoxlib.Pair.Pair', 'Pair', (['"""XRP"""', '"""USDT"""'], {}), "('XRP', 'USDT')\n", (1108, 1123), False, 'from cryptoxlib.Pair import Pair\n'), ((1230, 1249), 'cryptoxlib.Pair.Pair', 'Pair', (['"""ADA"""', '"""USDT"""'], {}), "('ADA', 'USDT')\n", (1234, 1249), False, 'from cryptoxlib.Pair import Pair\n'), ((1328, 1347), 'cryptoxlib.Pair.Pair', 'Pair', (['"""DOT"""', '"""USDT"""'], {}), "('DOT', 'USDT')\n", (1332, 1347), False, 'from cryptoxlib.Pair import Pair\n'), ((2832, 2850), 'cryptoxlib.Pair.Pair', 'Pair', (['"""BNB"""', '"""BTC"""'], {}), "('BNB', 'BTC')\n", (2836, 2850), False, 'from cryptoxlib.Pair import Pair\n'), ((3096, 3115), 'cryptoxlib.Pair.Pair', 'Pair', (['"""ETH"""', '"""USDT"""'], {}), "('ETH', 'USDT')\n", (3100, 3115), False, 'from cryptoxlib.Pair import Pair\n'), ((3374, 3393), 'cryptoxlib.Pair.Pair', 'Pair', (['"""ADA"""', '"""USDT"""'], {}), "('ADA', 'USDT')\n", (3378, 3393), False, 'from cryptoxlib.Pair import Pair\n'), ((3499, 3518), 'cryptoxlib.Pair.Pair', 'Pair', (['"""XRP"""', '"""USDT"""'], {}), "('XRP', 'USDT')\n", (3503, 3518), False, 'from cryptoxlib.Pair import Pair\n')]
|
#!/usr/bin/env python3
"""
Python EKF Planner
@Author: <NAME>, original MATLAB code and Python version
@Author: <NAME>, initial MATLAB port
Based on code by <NAME>, Oxford University,
http://www.robots.ox.ac.uk/~pnewman
"""
from collections import namedtuple
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from spatialmath import base
"""
Monte-carlo based localisation for estimating vehicle pose based on
odometry and observations of known landmarks.
"""
# TODO: refactor this and EKF, RNG, history, common plots, animation, movie
class ParticleFilter:
def __init__(self, robot, sensor, R, L, nparticles=500, seed=0, x0=None,
verbose=False, animate=False, history=True, workspace=None):
"""
Particle filter
:param robot: robot motion model
:type robot: :class:`VehicleBase` subclass,
:param sensor: vehicle mounted sensor model
:type sensor: :class:`SensorBase` subclass
:param R: covariance of the zero-mean Gaussian noise added to the particles at each step (diffusion)
:type R: ndarray(3,3)
:param L: covariance used in the sensor likelihood model
:type L: ndarray(2,2)
:param nparticles: number of particles, defaults to 500
:type nparticles: int, optional
:param seed: random number seed, defaults to 0
:type seed: int, optional
:param x0: initial state, defaults to [0, 0, 0]
:type x0: array_like(3), optional
:param verbose: display extra debug information, defaults to False
:type verbose: bool, optional
:param history: retain step-by-step history, defaults to True
:type history: bool, optional
:param workspace: dimension of workspace, see :func:`~spatialmath.base.graphics.expand_dims`
:type workspace: scalar, array_like(2), array_like(4)
This class implements a Monte-Carlo estimator or particle filter for
vehicle state, based on odometry, a landmark map, and landmark
observations. The state of each particle is a possible vehicle
configuration :math:`(x,y,\theta)`. Bootstrap particle resampling is
used.
The working area is defined by ``workspace`` or inherited from the
landmark map attached to the ``sensor`` (see
:func:`~spatialmath.base.graphics.expand_dims`):
============== ======= =======
``workspace`` x-range y-range
============== ======= =======
A (scalar) -A:A -A:A
[A, B] A:B A:B
[A, B, C, D] A:B C:D
============== ======= =======
Particles are initially distributed uniform randomly over this area.
Example::
V = np.diag([0.02, np.radians(0.5)]) ** 2
robot = Bicycle(covar=V, animation="car", workspace=10)
robot.control = RandomPath(workspace=robot)
map = LandmarkMap(nlandmarks=20, workspace=robot.workspace)
W = np.diag([0.1, np.radians(1)]) ** 2
sensor = RangeBearingSensor(robot, map, covar=W, plot=True)
R = np.diag([0.1, 0.1, np.radians(1)]) ** 2
L = np.diag([0.1, 0.1])
pf = ParticleFilter(robot, sensor, R, L, nparticles=1000)
pf.run(T=10)
map.plot()
robot.plot_xy()
pf.plot_xy()
plt.plot(pf.get_std()[:100,:])
.. note:: Set ``seed=0`` to get different behaviour from run to run.
:seealso: :meth:`run`
"""
self._robot = robot
self._sensor = sensor
self.R = R
self.L = L
self.nparticles = nparticles
self._animate = animate
# self.dim = sensor.map.dim
self._history = []
self.x = ()
self.weight = ()
self.w0 = 0.05
self._x0 = x0
# create a private random number stream if required
self._random = np.random.default_rng(seed)
self._seed = seed
self._keep_history = history # keep history
self._htuple = namedtuple("PFlog", "t odo xest std weights")
if workspace is not None:
self._dim = base.expand_dims(workspace)
else:
self._dim = sensor.map.workspace
self._workspace = self.robot.workspace
self._init()
def __str__(self):
#ParticleFilter.char Convert to string
#
# PF.char() is a string representing the state of the ParticleFilter
# object in human-readable form.
#
# See also ParticleFilter.display.
def indent(s, n=2):
spaces = ' ' * n
return s.replace('\n', '\n' + spaces)
s = f"ParticleFilter object: {self.nparticles} particles"
s += '\nR: ' + base.array2str(self.R)
s += '\nL: ' + base.array2str(self.L)
if self.robot is not None:
s += indent("\nrobot: " + str(self.robot))
if self.sensor is not None:
s += indent("\nsensor: " + str(self.sensor))
return s
@property
def robot(self):
"""
Get robot object
:return: robot used in simulation
:rtype: :class:`VehicleBase` subclass
"""
return self._robot
@property
def sensor(self):
"""
Get sensor object
:return: sensor used in simulation
:rtype: :class:`SensorBase` subclass
"""
return self._sensor
@property
def map(self):
"""
Get map object
:return: map used in simulation
:rtype: :class:`LandmarkMap` subclass
"""
return self._map
@property
def verbose(self):
"""
Get verbosity state
:return: verbosity
:rtype: bool
"""
return self._verbose
@property
def history(self):
"""
Get EKF simulation history
:return: simulation history
:rtype: list of namedtuples
At each simulation timestep a namedtuple of is appended to the history
list. It contains, for that time step, estimated state and covariance,
and sensor observation.
:seealso: :meth:`get_t` :meth:`get_xy` :meth:`get_std`
:meth:`get_Pnorm`
"""
return self._history
@property
def workspace(self):
"""
Size of robot workspace
:return: workspace bounds [xmin, xmax, ymin, ymax]
:rtype: ndarray(4)
Returns the bounds of the workspace as specified by constructor
option ``workspace``
"""
return self._workspace
@property
def random(self):
"""
Get private random number generator
:return: NumPy random number generator
:rtype: :class:`numpy.random.Generator`
Has methods including:
- ``integers(low, high, size, endpoint)``
- ``random(size)``
- ``uniform``
- ``normal(mean, std, size)``
- ``multivariate_normal(mean, covar, size)``
The generator is initialized with the seed provided at constructor
time every time ``init`` is called.
"""
return self._random
def _init(self, x0=None):
#ParticleFilter.init Initialize the particle filter
#
# PF.init() initializes the particle distribution and clears the
# history.
#
# Notes::
# - If initial particle states were given to the constructor the states are
# set to this value, else a random distribution over the map is used.
# - Invoked by the run() method.
self.robot.init()
self.sensor.init()
#clear the history
self._history = []
# create a new private random number generator
if self._seed is not None:
self._random = np.random.default_rng(self._seed)
self._t = 0
# initialize particles
if x0 is None:
x0 = self._x0
if x0 is None:
# create initial particle distribution as uniformly randomly distributed
# over the map workspace and heading angles
x = self.random.uniform(self.workspace[0], self.workspace[1], size=(self.nparticles,))
y = self.random.uniform(self.workspace[2], self.workspace[3], size=(self.nparticles,))
t = self.random.uniform(-np.pi, np.pi, size=(self.nparticles,))
self.x = np.c_[x, y, t]
self.weight = np.ones((self.nparticles,))
def run(self, T=10, x0=None):
"""
Run the particle filter simulation
:param T: maximum simulation time in seconds
:type T: float
:param animate: animate motion of vehicle, defaults to False
:type animate: bool, optional
:param movie: name of movie file to create, defaults to None
:type movie: str, optional
Simulates the motion of a vehicle (under the control of a driving agent)
and the EKF estimator. The steps are:
- initialize the filter, vehicle and vehicle driver agent, sensor
- for each time step:
- step the vehicle and its driver agent, obtain odometry
- take a sensor reading
- execute the EKF
- save information as a namedtuple to the history list for later display
:seealso: :meth:`history` :meth:`landmark` :meth:`landmarks`
:meth:`get_xy` :meth:`get_t` :meth:`get_std`
:meth:`plot_xy`
"""
self._init(x0=x0)
# anim = Animate(opt.movie)
# display the initial particles
if self._animate:
self.h, = plt.plot(self.x[:, 0], self.x[:, 1], 'go', zorder=0, markersize=3, markeredgecolor='none', alpha=0.3, label='particle')
# set(self.h, 'Tag', 'particles')
# self.robot.plot()
# iterate over time
for i in range(round(T / self.robot.dt)):
self._step()
# anim.add()
# anim.close()
def _step(self):
#fprintf('---- step\n')
odo = self.robot.step() # move the robot
# update the particles based on odometry
self._predict(odo)
# get a sensor reading
z, lm_id = self.sensor.reading()
if z is not None:
self._observe(z, lm_id)
#fprintf(' observe beacon #d\n', lm_id)
self._select()
# our estimate is simply the mean of the particles
x_est = self.x.mean(axis=0)
std_est = self.x.std(axis=0)
# std is more complex for angles, need to account for 2pi wrap
std_est[2] = np.sqrt(np.sum(base.angdiff(self.x[:,2], x_est[2]) ** 2)) / (self.nparticles-1)
# display the updated particles
# set(self.h, 'Xdata', self.x(:,1), 'Ydata', self.x(:,2), 'Zdata', self.x(:,3))
if self._animate:
self.h.set_xdata(self.x[:, 0])
self.h.set_ydata(self.x[:, 1])
# if ~isempty(self.anim)
# self.anim.add()
if self._keep_history:
hist = self._htuple(
self.robot._t,
odo.copy(),
x_est,
std_est,
self.weight.copy()
)
self._history.append(hist)
def plot_pdf(self):
"""
Plot particle PDF
Displays a discrete PDF of vehicle position. Creates a 3D plot where
the x- and y-axes are the estimated vehicle position and the z-axis is
the particle weight. Each particle is represented by a a vertical line
segment of height equal to particle weight.
"""
ax = base.plotvol3()
for (x, y, t), weight in zip(self.x, self.weight):
# ax.plot([x, x], [y, y], [0, weight], 'r')
ax.plot([x, x], [y, y], [0, weight], 'skyblue', linewidth=3)
ax.plot(x, y, weight, 'k.', markersize=6)
plt.grid(True)
plt.xlabel('X')
plt.ylabel('Y')
plt.xlim()
ax.set_zlabel('particle weight')
ax.view_init(29, 59)
def _predict(self, odo):
# step 2
# update the particle state based on odometry and a random perturbation
# Straightforward code:
#
# for i=1:self.nparticles
# x = self.robot.f( self.x(i,:), odo)' + sqrt(self.R)*self.randn[2,0]
# x[2] = angdiff(x[2])
# self.x(i,:) = x
#
# Vectorized code:
self.x = self.robot.f(self.x, odo) + \
self.random.multivariate_normal((0, 0, 0), self.R, size=self.nparticles)
self.x[:, 2] = base.angdiff(self.x[:, 2])
def _observe(self, z, lm_id):
# step 3
# predict observation and score the particles
# Straightforward code:
#
# for p = 1:self.nparticles
# # what do we expect observation to be for this particle?
# # use the sensor model h(.)
# z_pred = self.sensor.h( self.x(p,:), lm_id)
#
# # how different is it
# innov[0] = z[0] - z_pred[0]
# innov[1] = angdiff(z[1], z_pred[1])
#
# # get likelihood (new importance). Assume Gaussian but any PDF works!
# # If predicted obs is very different from actual obs this score will be low
# # ie. this particle is not very good at predicting the observation.
# # A lower score means it is less likely to be selected for the next generation...
# # The weight is never zero.
# self.weight(p) = exp(-0.5*innov'*inv(self.L)*innov) + 0.05
# end
#
# Vectorized code:
invL = np.linalg.inv(self.L)
z_pred = self.sensor.h(self.x, lm_id)
z_pred[:, 0] = z[0] - z_pred[:, 0]
z_pred[:, 1] = base.angdiff(z[1], z_pred[:, 1])
LL = -0.5 * np.r_[invL[0,0], invL[1,1], 2*invL[0,1]]
e = np.c_[z_pred[:, 0]**2, z_pred[:, 1]**2, z_pred[:,0] * z_pred[:, 1]] @ LL
self.weight = np.exp(e) + self.w0
def _select(self):
# step 4
# select particles based on their weights
#
# particles with large weights will occupy a greater percentage of the
# y axis in a cummulative plot
cdf = np.cumsum(self.weight) / self.weight.sum()
# so randomly (uniform) choosing y values is more likely to correspond to
# better particles...
iselect = self.random.uniform(0, 1, size=(self.nparticles,))
# find the particle that corresponds to each y value (just a look up)
interpfun = sp.interpolate.interp1d(cdf, np.arange(self.nparticles),
assume_sorted=True, kind='nearest', fill_value='extrapolate')
inextgen = interpfun(iselect).astype(np.int)
# copy selected particles for next generation..
self.x = self.x[inextgen, :]
def get_t(self):
"""
Get time from simulation
:return: simulation time vector
:rtype: ndarray(n)
Return simulation time vector, starts at zero. The timestep is an
attribute of the ``robot`` object.
"""
return np.array([h.t for h in self._history])
def get_xyt(self):
r"""
Get estimated vehicle trajectory
:return: vehicle trajectory where each row is configuration :math:`(x, y, \theta)`
:rtype: ndarray(n,3)
:seealso: :meth:`plot_xy` :meth:`run` :meth:`history`
"""
return np.array([h.xest[:2] for h in self._history])
def get_std(self):
r"""
Get standard deviation of particles
:return: standard deviation of vehicle position estimate
:rtype: ndarray(n,2)
Return the standard deviation :math:`(\sigma_x, \sigma_y)` of the
particle cloud at each time step.
:seealso: :meth:`get_xyt`
"""
return np.array([h.std for h in self._history])
def plot_xy(self, block=False, **kwargs):
r"""
Plot estimated vehicle position
:param args: position arguments passed to :meth:`~matplotlib.axes.Axes.plot`
:param kwargs: keywords arguments passed to :meth:`~matplotlib.axes.Axes.plot`
:param block: hold plot until figure is closed, defaults to False
:type block: bool, optional
Plot the estimated vehicle path in the xy-plane.
:seealso: :meth:`get_xy`
"""
xyt = self.get_xyt()
plt.plot(xyt[:, 0], xyt[:, 1], **kwargs)
# plt.show(block=block)
|
[
"matplotlib.pyplot.xlim",
"spatialmath.base.expand_dims",
"matplotlib.pyplot.plot",
"numpy.ones",
"numpy.random.default_rng",
"numpy.cumsum",
"spatialmath.base.array2str",
"numpy.linalg.inv",
"spatialmath.base.plotvol3",
"collections.namedtuple",
"numpy.array",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"spatialmath.base.angdiff",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"numpy.arange"
] |
[((3961, 3988), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (3982, 3988), True, 'import numpy as np\n'), ((4096, 4141), 'collections.namedtuple', 'namedtuple', (['"""PFlog"""', '"""t odo xest std weights"""'], {}), "('PFlog', 't odo xest std weights')\n", (4106, 4141), False, 'from collections import namedtuple\n'), ((8522, 8549), 'numpy.ones', 'np.ones', (['(self.nparticles,)'], {}), '((self.nparticles,))\n', (8529, 8549), True, 'import numpy as np\n'), ((11741, 11756), 'spatialmath.base.plotvol3', 'base.plotvol3', ([], {}), '()\n', (11754, 11756), False, 'from spatialmath import base\n'), ((12008, 12022), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (12016, 12022), True, 'import matplotlib.pyplot as plt\n'), ((12031, 12046), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (12041, 12046), True, 'import matplotlib.pyplot as plt\n'), ((12055, 12070), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (12065, 12070), True, 'import matplotlib.pyplot as plt\n'), ((12079, 12089), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (12087, 12089), True, 'import matplotlib.pyplot as plt\n'), ((12707, 12733), 'spatialmath.base.angdiff', 'base.angdiff', (['self.x[:, 2]'], {}), '(self.x[:, 2])\n', (12719, 12733), False, 'from spatialmath import base\n'), ((13778, 13799), 'numpy.linalg.inv', 'np.linalg.inv', (['self.L'], {}), '(self.L)\n', (13791, 13799), True, 'import numpy as np\n'), ((13912, 13944), 'spatialmath.base.angdiff', 'base.angdiff', (['z[1]', 'z_pred[:, 1]'], {}), '(z[1], z_pred[:, 1])\n', (13924, 13944), False, 'from spatialmath import base\n'), ((15267, 15305), 'numpy.array', 'np.array', (['[h.t for h in self._history]'], {}), '([h.t for h in self._history])\n', (15275, 15305), True, 'import numpy as np\n'), ((15595, 15640), 'numpy.array', 'np.array', (['[h.xest[:2] for h in self._history]'], {}), '([h.xest[:2] for h in self._history])\n', (15603, 15640), True, 'import numpy as np\n'), ((15996, 16036), 'numpy.array', 'np.array', (['[h.std for h in self._history]'], {}), '([h.std for h in self._history])\n', (16004, 16036), True, 'import numpy as np\n'), ((16561, 16601), 'matplotlib.pyplot.plot', 'plt.plot', (['xyt[:, 0]', 'xyt[:, 1]'], {}), '(xyt[:, 0], xyt[:, 1], **kwargs)\n', (16569, 16601), True, 'import matplotlib.pyplot as plt\n'), ((4201, 4228), 'spatialmath.base.expand_dims', 'base.expand_dims', (['workspace'], {}), '(workspace)\n', (4217, 4228), False, 'from spatialmath import base\n'), ((4808, 4830), 'spatialmath.base.array2str', 'base.array2str', (['self.R'], {}), '(self.R)\n', (4822, 4830), False, 'from spatialmath import base\n'), ((4855, 4877), 'spatialmath.base.array2str', 'base.array2str', (['self.L'], {}), '(self.L)\n', (4869, 4877), False, 'from spatialmath import base\n'), ((7888, 7921), 'numpy.random.default_rng', 'np.random.default_rng', (['self._seed'], {}), '(self._seed)\n', (7909, 7921), True, 'import numpy as np\n'), ((9705, 9828), 'matplotlib.pyplot.plot', 'plt.plot', (['self.x[:, 0]', 'self.x[:, 1]', '"""go"""'], {'zorder': '(0)', 'markersize': '(3)', 'markeredgecolor': '"""none"""', 'alpha': '(0.3)', 'label': '"""particle"""'}), "(self.x[:, 0], self.x[:, 1], 'go', zorder=0, markersize=3,\n markeredgecolor='none', alpha=0.3, label='particle')\n", (9713, 9828), True, 'import matplotlib.pyplot as plt\n'), ((14114, 14123), 'numpy.exp', 'np.exp', (['e'], {}), '(e)\n', (14120, 14123), True, 'import numpy as np\n'), ((14378, 14400), 'numpy.cumsum', 'np.cumsum', (['self.weight'], {}), '(self.weight)\n', (14387, 14400), True, 'import numpy as np\n'), ((14732, 14758), 'numpy.arange', 'np.arange', (['self.nparticles'], {}), '(self.nparticles)\n', (14741, 14758), True, 'import numpy as np\n'), ((10723, 10759), 'spatialmath.base.angdiff', 'base.angdiff', (['self.x[:, 2]', 'x_est[2]'], {}), '(self.x[:, 2], x_est[2])\n', (10735, 10759), False, 'from spatialmath import base\n')]
|
# Generated by Django 2.2.13 on 2020-08-26 20:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pipeline', '0063_merge_20200826_2021'),
]
operations = [
migrations.RenameField(
model_name='censussubdivision',
old_name='households_owner_spending_30_pct_income',
new_name='households_owner_pct_spending_30_pct_income',
),
migrations.RenameField(
model_name='censussubdivision',
old_name='households_tenant_spending_30_pct_income',
new_name='households_tenant_pct_spending_30_pct_income',
),
migrations.AddField(
model_name='censussubdivision',
name='households_owner_count_mortgage',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='censussubdivision',
name='households_owner_count_spending_30_pct_income',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='censussubdivision',
name='households_tenant_count_spending_30_pct_income',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='censussubdivision',
name='households_tenant_count_subsidized_housing',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='censussubdivision',
name='pop_count_0_14',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='censussubdivision',
name='pop_count_14_65',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='censussubdivision',
name='pop_count_65',
field=models.IntegerField(null=True),
),
]
|
[
"django.db.models.IntegerField",
"django.db.migrations.RenameField"
] |
[((238, 410), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""censussubdivision"""', 'old_name': '"""households_owner_spending_30_pct_income"""', 'new_name': '"""households_owner_pct_spending_30_pct_income"""'}), "(model_name='censussubdivision', old_name=\n 'households_owner_spending_30_pct_income', new_name=\n 'households_owner_pct_spending_30_pct_income')\n", (260, 410), False, 'from django.db import migrations, models\n'), ((457, 631), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""censussubdivision"""', 'old_name': '"""households_tenant_spending_30_pct_income"""', 'new_name': '"""households_tenant_pct_spending_30_pct_income"""'}), "(model_name='censussubdivision', old_name=\n 'households_tenant_spending_30_pct_income', new_name=\n 'households_tenant_pct_spending_30_pct_income')\n", (479, 631), False, 'from django.db import migrations, models\n'), ((813, 843), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (832, 843), False, 'from django.db import migrations, models\n'), ((1013, 1043), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (1032, 1043), False, 'from django.db import migrations, models\n'), ((1214, 1244), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (1233, 1244), False, 'from django.db import migrations, models\n'), ((1411, 1441), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (1430, 1441), False, 'from django.db import migrations, models\n'), ((1580, 1610), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (1599, 1610), False, 'from django.db import migrations, models\n'), ((1750, 1780), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (1769, 1780), False, 'from django.db import migrations, models\n'), ((1917, 1947), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (1936, 1947), False, 'from django.db import migrations, models\n')]
|
from datetime import datetime
import logging
from flask import jsonify, request
from app import app, db
from app.models import Player, Chart, Score
from app.ranking import update_pb_for_score, update_player_osmos
from app.lazer import process_lazer_payload
from . import dumb_decryption
@app.route('/versions')
def versions():
return jsonify({
'osu': app.config.get('REQUIRED_OSU_VERSION', 0),
'pusher': app.config.get('REQUIRED_PUSHER_VERSION', 0),
})
@app.route('/lazer', methods=['POST'])
def lazer_score():
return score(process_lazer_payload(request.json), decrypt=False)
@app.route('/score', methods=['POST'])
def score(data=None, decrypt=True):
data = data or request.data
if not data:
logging.warning('Empty request')
return 'No data', 400
print('got a score!!')
print(data)
data = dumb_decryption(data) if decrypt else data
try:
player = Player.query.get(data['player']['id'])
if not player:
player = Player(data['player'])
else:
player.update_fields(data['player'])
chart = Chart.query.get(data['chart']['chart_id'])
if not chart:
chart = Chart(data['chart'])
else:
chart.update_fields(data['chart'])
data['score']['hash'] = data['chart'].get('hash')
score = Score(data['score'], chart)
score.achieved_at = datetime.utcnow()
score.player = player
score.version = 6
if not score.is_supported():
db.session.rollback()
print('score ignored because not supported')
return 'Not OK'
db.session.add_all([player, chart, score])
db.session.commit()
print('pushed to db! ({} played by {})'.format(
chart.name, player.username
))
score.set_osmos()
print('osmos set')
print('updating pb if needed')
if update_pb_for_score(player, score):
print('updated pb returned true')
update_player_osmos(player)
player.playcount += 1
db.session.commit()
except Exception as e:
db.session.rollback()
logging.warning(f'Malformed score payload: \n{data}')
raise logging.warning(e, exc_info=True)
return 'OK'
|
[
"app.app.config.get",
"app.app.route",
"app.db.session.rollback",
"app.models.Player",
"logging.warning",
"app.models.Chart",
"datetime.datetime.utcnow",
"app.ranking.update_pb_for_score",
"app.db.session.commit",
"app.ranking.update_player_osmos",
"app.models.Score",
"app.models.Chart.query.get",
"app.models.Player.query.get",
"app.db.session.add_all",
"app.lazer.process_lazer_payload"
] |
[((291, 313), 'app.app.route', 'app.route', (['"""/versions"""'], {}), "('/versions')\n", (300, 313), False, 'from app import app, db\n'), ((483, 520), 'app.app.route', 'app.route', (['"""/lazer"""'], {'methods': "['POST']"}), "('/lazer', methods=['POST'])\n", (492, 520), False, 'from app import app, db\n'), ((611, 648), 'app.app.route', 'app.route', (['"""/score"""'], {'methods': "['POST']"}), "('/score', methods=['POST'])\n", (620, 648), False, 'from app import app, db\n'), ((557, 592), 'app.lazer.process_lazer_payload', 'process_lazer_payload', (['request.json'], {}), '(request.json)\n', (578, 592), False, 'from app.lazer import process_lazer_payload\n'), ((742, 774), 'logging.warning', 'logging.warning', (['"""Empty request"""'], {}), "('Empty request')\n", (757, 774), False, 'import logging\n'), ((928, 966), 'app.models.Player.query.get', 'Player.query.get', (["data['player']['id']"], {}), "(data['player']['id'])\n", (944, 966), False, 'from app.models import Player, Chart, Score\n'), ((1113, 1155), 'app.models.Chart.query.get', 'Chart.query.get', (["data['chart']['chart_id']"], {}), "(data['chart']['chart_id'])\n", (1128, 1155), False, 'from app.models import Player, Chart, Score\n'), ((1354, 1381), 'app.models.Score', 'Score', (["data['score']", 'chart'], {}), "(data['score'], chart)\n", (1359, 1381), False, 'from app.models import Player, Chart, Score\n'), ((1410, 1427), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1425, 1427), False, 'from datetime import datetime\n'), ((1648, 1690), 'app.db.session.add_all', 'db.session.add_all', (['[player, chart, score]'], {}), '([player, chart, score])\n', (1666, 1690), False, 'from app import app, db\n'), ((1699, 1718), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1716, 1718), False, 'from app import app, db\n'), ((1929, 1963), 'app.ranking.update_pb_for_score', 'update_pb_for_score', (['player', 'score'], {}), '(player, score)\n', (1948, 1963), False, 'from app.ranking import update_pb_for_score, update_player_osmos\n'), ((2089, 2108), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2106, 2108), False, 'from app import app, db\n'), ((366, 407), 'app.app.config.get', 'app.config.get', (['"""REQUIRED_OSU_VERSION"""', '(0)'], {}), "('REQUIRED_OSU_VERSION', 0)\n", (380, 407), False, 'from app import app, db\n'), ((427, 471), 'app.app.config.get', 'app.config.get', (['"""REQUIRED_PUSHER_VERSION"""', '(0)'], {}), "('REQUIRED_PUSHER_VERSION', 0)\n", (441, 471), False, 'from app import app, db\n'), ((1011, 1033), 'app.models.Player', 'Player', (["data['player']"], {}), "(data['player'])\n", (1017, 1033), False, 'from app.models import Player, Chart, Score\n'), ((1198, 1218), 'app.models.Chart', 'Chart', (["data['chart']"], {}), "(data['chart'])\n", (1203, 1218), False, 'from app.models import Player, Chart, Score\n'), ((1533, 1554), 'app.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (1552, 1554), False, 'from app import app, db\n'), ((2023, 2050), 'app.ranking.update_player_osmos', 'update_player_osmos', (['player'], {}), '(player)\n', (2042, 2050), False, 'from app.ranking import update_pb_for_score, update_player_osmos\n'), ((2144, 2165), 'app.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (2163, 2165), False, 'from app import app, db\n'), ((2174, 2230), 'logging.warning', 'logging.warning', (['f"""Malformed score payload: \n{data}"""'], {}), '(f"""Malformed score payload: \n{data}""")\n', (2189, 2230), False, 'import logging\n'), ((2242, 2275), 'logging.warning', 'logging.warning', (['e'], {'exc_info': '(True)'}), '(e, exc_info=True)\n', (2257, 2275), False, 'import logging\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Generate various Similarity matrix
through the MatrixGenerator methods
gen_matrix for synthetic data, and
gen_E_coli_matrix for DNA data.
"""
import numpy as np
# from scipy import sparse as sp
from scipy.linalg import toeplitz
def gen_lambdas(type_matrix, n):
'''
Generates lambdas to define a toeplitz matrix with
diagonal elements t_k = lambdas[k]
'''
array_lambdas = np.zeros(n)
if type_matrix == 'LinearBanded':
# Bandwidth = 10% ?
cov = int(np.floor(n/10))
array_lambdas[:cov] = cov - abs(np.arange(cov))
elif type_matrix == 'LinearStrongDecrease':
alpha = 0.1
array_lambdas = np.exp(-alpha*np.arange(n))
elif type_matrix == 'CircularBanded':
# Bandwidth = 10% ?
cov = int(np.floor(n/10))
array_lambdas[:cov] = cov - abs(np.arange(cov))
array_lambdas[-cov:] = array_lambdas[:cov][::-1]
elif type_matrix == 'CircularStrongDecrease':
alpha = 0.1
array_lambdas = np.exp(-alpha*np.arange(n))
p = int(np.floor(n/2))
array_lambdas[-p:] = array_lambdas[:p][::-1]
else:
raise ValueError("Unrecognized type_matrix !")
return(array_lambdas)
def gen_toeplitz_sim(lambdas):
'''Build Toeplitz strong-R-matrix'''
return(toeplitz(lambdas))
#
#
# def sym_max(X):
# """
# Returns symmetrization of sparse matrix X.
# X_sym = max(X, X.T) rather than X + X.T to avoid adding up values when
# there are duplicates in the overlap file.
# If X is triangular, max(X, X.T) and X + X.T are equal.
#
# TODO : check how many values are not symmetric
# and separate cases where Aij = 0 ...
# """
#
# dif_mat = X - X.T
# dif_mat.data = np.where(dif_mat.data < 0, 1, 0)
# return X - X.multiply(dif_mat) + X.T.multiply(dif_mat)
class MatrixGenerator():
# Apply permutation
def apply_perm(self, perm):
'''
Apply a permutation to the similarity matrix.
perm is given as a numpy array
'''
n_ = self.n
# check size is ok
if np.shape(perm)[0] != n_:
raise ValueError('the size of the permutation matrix does not match that of the\
similarity matrix.')
# check perm is a permutation
if not (np.sort(perm) == np.arange(n_)).all():
raise ValueError('perm is not considered as a'
'permutation matrix of [0; \cdots; n-1]')
self.sim_matrix = self.sim_matrix[perm]
self.sim_matrix = self.sim_matrix.T[perm]
self.sim_matrix = self.sim_matrix.T
return self
# Add additive noise
def add_sparse_noise(self, noise_prop, noise_eps,
law='uniform'):
'''
Create a function that add a symetric sparse noise!
noiseprop controls the support of the sparse noise
noiseeps controls the eps amplitude of the noise
'''
n_ = self.n
# first find a random support
N = np.tril(np.random.rand(n_, n_))
idx = np.where(N > noise_prop)
N[idx] = 0
# allocate value on the support
[ii, jj] = np.where(N != 0)
if law == 'gaussian':
N[np.where(N != 0)] = noise_eps * np.abs(
np.random.normal(0, 1, len(ii)))
elif law == 'uniform':
N[np.where(N != 0)] = noise_eps*np.random.rand(1, len(ii))
# symetrize the noise
N = N + N.T
# Add noise to similarity matrix
self.sim_matrix += N
return self
def gen_matrix(self, n, type_matrix='LinearBanded',
apply_perm=True, perm=None,
noise_prop=1, noise_ampl=0, law='uniform'):
self.n = n
lambdas = gen_lambdas(type_matrix, n)
self.sim_matrix = gen_toeplitz_sim(lambdas)
if apply_perm:
if not perm: # generate permutation if not provided by user
perm = np.random.permutation(n)
self.apply_perm(perm)
self.true_perm = perm
else:
self.true_perm = np.arange(n)
if noise_ampl > 0:
normed_fro = np.sqrt(np.mean(self.sim_matrix**2))
self.add_sparse_noise(noise_prop, noise_ampl*normed_fro, law=law)
return self
#
# def gen_E_coli_matrix(self, apply_perm=False):
# """
# generate similarity matrix from <NAME>i ONT reads [ref Loman et al.]
# TODO :
# - change the path to data folder if this is a package ?
# - recompute reads_pos with minimap2 instead of BWA.
# """
# # Read data matrix
# data_dir = './data/'
# mat_fn = data_dir + 'ecoli_mat.csv'
# pos_fn = data_dir + 'ecoli_ref_pos.csv'
# mat_idxs = np.genfromtxt(mat_fn, delimiter=',')
# reads_pos = np.genfromtxt(pos_fn, delimiter=',')
# n_reads = reads_pos.shape[0]
# sim_mat = sp.coo_matrix((mat_idxs[:, 2],
# (mat_idxs[:, 0]-1, mat_idxs[:, 1]-1)),
# shape=(n_reads, n_reads),
# dtype='float64').tocsr()
# sim_mat = sym_max(sim_mat)
# # Remove unaligned reads (unknown ground turh position)
# in_idx = np.argwhere(reads_pos < 7e6)[:, 0]
# sim_lil = sim_mat.tolil()
# self.n = len(in_idx)
# if apply_perm:
# perm = np.random.permutation(self.n)
# self.true_perm = perm
# in_idx = in_idx[perm]
# else:
# self.true_perm = np.arange(self.n)
# sim_lil = sim_lil[in_idx, :][:, in_idx]
# self.sim_matrix = sim_lil.tocsr()
#
# return self
|
[
"scipy.linalg.toeplitz",
"numpy.floor",
"numpy.zeros",
"numpy.shape",
"numpy.sort",
"numpy.where",
"numpy.arange",
"numpy.mean",
"numpy.random.permutation",
"numpy.random.rand"
] |
[((444, 455), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (452, 455), True, 'import numpy as np\n'), ((1336, 1353), 'scipy.linalg.toeplitz', 'toeplitz', (['lambdas'], {}), '(lambdas)\n', (1344, 1353), False, 'from scipy.linalg import toeplitz\n'), ((3120, 3144), 'numpy.where', 'np.where', (['(N > noise_prop)'], {}), '(N > noise_prop)\n', (3128, 3144), True, 'import numpy as np\n'), ((3223, 3239), 'numpy.where', 'np.where', (['(N != 0)'], {}), '(N != 0)\n', (3231, 3239), True, 'import numpy as np\n'), ((540, 556), 'numpy.floor', 'np.floor', (['(n / 10)'], {}), '(n / 10)\n', (548, 556), True, 'import numpy as np\n'), ((3082, 3104), 'numpy.random.rand', 'np.random.rand', (['n_', 'n_'], {}), '(n_, n_)\n', (3096, 3104), True, 'import numpy as np\n'), ((4155, 4167), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (4164, 4167), True, 'import numpy as np\n'), ((596, 610), 'numpy.arange', 'np.arange', (['cov'], {}), '(cov)\n', (605, 610), True, 'import numpy as np\n'), ((2130, 2144), 'numpy.shape', 'np.shape', (['perm'], {}), '(perm)\n', (2138, 2144), True, 'import numpy as np\n'), ((3284, 3300), 'numpy.where', 'np.where', (['(N != 0)'], {}), '(N != 0)\n', (3292, 3300), True, 'import numpy as np\n'), ((4019, 4043), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (4040, 4043), True, 'import numpy as np\n'), ((4228, 4257), 'numpy.mean', 'np.mean', (['(self.sim_matrix ** 2)'], {}), '(self.sim_matrix ** 2)\n', (4235, 4257), True, 'import numpy as np\n'), ((719, 731), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (728, 731), True, 'import numpy as np\n'), ((822, 838), 'numpy.floor', 'np.floor', (['(n / 10)'], {}), '(n / 10)\n', (830, 838), True, 'import numpy as np\n'), ((3418, 3434), 'numpy.where', 'np.where', (['(N != 0)'], {}), '(N != 0)\n', (3426, 3434), True, 'import numpy as np\n'), ((878, 892), 'numpy.arange', 'np.arange', (['cov'], {}), '(cov)\n', (887, 892), True, 'import numpy as np\n'), ((1090, 1105), 'numpy.floor', 'np.floor', (['(n / 2)'], {}), '(n / 2)\n', (1098, 1105), True, 'import numpy as np\n'), ((2352, 2365), 'numpy.sort', 'np.sort', (['perm'], {}), '(perm)\n', (2359, 2365), True, 'import numpy as np\n'), ((2369, 2382), 'numpy.arange', 'np.arange', (['n_'], {}), '(n_)\n', (2378, 2382), True, 'import numpy as np\n'), ((1060, 1072), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1069, 1072), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mapeo', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='organizaciones',
name='slug',
field=models.SlugField(default=1, max_length=450, editable=False),
preserve_default=False,
),
migrations.AlterField(
model_name='organizaciones',
name='tipo',
field=models.IntegerField(choices=[(1, b'Organizaci\xc3\xb3n que apoya y participa con la Campa\xc3\xb1a'), (2, b'Comit\xc3\xa9 comunal'), (3, b'Diplomado de promotor\xc3\xada'), (4, b'Diplomado de comunicaci\xc3\xb3n')]),
),
]
|
[
"django.db.models.IntegerField",
"django.db.models.SlugField"
] |
[((343, 402), 'django.db.models.SlugField', 'models.SlugField', ([], {'default': '(1)', 'max_length': '(450)', 'editable': '(False)'}), '(default=1, max_length=450, editable=False)\n', (359, 402), False, 'from django.db import models, migrations\n'), ((566, 793), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(1, b'Organizaci\\xc3\\xb3n que apoya y participa con la Campa\\xc3\\xb1a'), (\n 2, b'Comit\\xc3\\xa9 comunal'), (3, b'Diplomado de promotor\\xc3\\xada'), (\n 4, b'Diplomado de comunicaci\\xc3\\xb3n')]"}), "(choices=[(1,\n b'Organizaci\\xc3\\xb3n que apoya y participa con la Campa\\xc3\\xb1a'), (2,\n b'Comit\\xc3\\xa9 comunal'), (3, b'Diplomado de promotor\\xc3\\xada'), (4,\n b'Diplomado de comunicaci\\xc3\\xb3n')])\n", (585, 793), False, 'from django.db import models, migrations\n')]
|
# Generated by Django 2.2.13 on 2020-09-10 15:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("hierarchy", "0010_auto_20200910_1650")]
operations = [
migrations.AlterUniqueTogether(
name="subheading",
unique_together={("commodity_code", "description", "nomenclature_tree")},
)
]
|
[
"django.db.migrations.AlterUniqueTogether"
] |
[((215, 343), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""subheading"""', 'unique_together': "{('commodity_code', 'description', 'nomenclature_tree')}"}), "(name='subheading', unique_together={(\n 'commodity_code', 'description', 'nomenclature_tree')})\n", (245, 343), False, 'from django.db import migrations\n')]
|
from contextlib import ExitStack
from functools import wraps
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar, overload
import fsspec
from funcy import cached_property
if TYPE_CHECKING:
from typing import BinaryIO, Callable, TextIO, Union
from typing_extensions import ParamSpec
from dvc.progress import Tqdm
from dvc.ui._rich_progress import RichTransferProgress
_P = ParamSpec("_P")
_R = TypeVar("_R")
class FsspecCallback(fsspec.Callback):
"""FsspecCallback usable as a context manager, and a few helper methods."""
@overload
def wrap_attr(self, fobj: "BinaryIO", method: str = "read") -> "BinaryIO":
...
@overload
def wrap_attr(self, fobj: "TextIO", method: str = "read") -> "TextIO":
...
def wrap_attr(
self, fobj: "Union[TextIO, BinaryIO]", method: str = "read"
) -> "Union[TextIO, BinaryIO]":
from tqdm.utils import CallbackIOWrapper
wrapped = CallbackIOWrapper(self.relative_update, fobj, method)
return wrapped
def wrap_fn(self, fn: "Callable[_P, _R]") -> "Callable[_P, _R]":
@wraps(fn)
def wrapped(*args: "_P.args", **kwargs: "_P.kwargs") -> "_R":
res = fn(*args, **kwargs)
self.relative_update()
return res
return wrapped
def wrap_and_branch(self, fn: "Callable") -> "Callable":
"""
Wraps a function, and pass a new child callback to it.
When the function completes, we increment the parent callback by 1.
"""
wrapped = self.wrap_fn(fn)
@wraps(fn)
def func(path1: str, path2: str):
kw: Dict[str, Any] = {}
with self.branch(path1, path2, kw):
return wrapped(path1, path2, **kw)
return func
def __enter__(self):
return self
def __exit__(self, *exc_args):
self.close()
def close(self):
"""Handle here on exit."""
def relative_update(self, inc: int = 1) -> None:
inc = inc if inc is not None else 0
return super().relative_update(inc)
def absolute_update(self, value: int) -> None:
value = value if value is not None else self.value
return super().absolute_update(value)
@classmethod
def as_callback(
cls, maybe_callback: Optional["FsspecCallback"] = None
) -> "FsspecCallback":
if maybe_callback is None:
return DEFAULT_CALLBACK
return maybe_callback
@classmethod
def as_tqdm_callback(
cls,
callback: Optional["FsspecCallback"] = None,
**tqdm_kwargs: Any,
) -> "FsspecCallback":
return callback or TqdmCallback(**tqdm_kwargs)
@classmethod
def as_rich_callback(
cls, callback: Optional["FsspecCallback"] = None, **rich_kwargs
):
return callback or RichCallback(**rich_kwargs)
def branch(
self,
path_1: str,
path_2: str,
kwargs: Dict[str, Any],
child: "FsspecCallback" = None,
) -> "FsspecCallback":
child = kwargs["callback"] = child or DEFAULT_CALLBACK
return child
class NoOpCallback(FsspecCallback, fsspec.callbacks.NoOpCallback):
pass
class TqdmCallback(FsspecCallback):
def __init__(
self,
size: Optional[int] = None,
value: int = 0,
progress_bar: "Tqdm" = None,
**tqdm_kwargs,
):
tqdm_kwargs["total"] = size or -1
self._tqdm_kwargs = tqdm_kwargs
self._progress_bar = progress_bar
self._stack = ExitStack()
super().__init__(size=size, value=value)
@cached_property
def progress_bar(self):
from dvc.progress import Tqdm
progress_bar = (
self._progress_bar
if self._progress_bar is not None
else Tqdm(**self._tqdm_kwargs)
)
return self._stack.enter_context(progress_bar)
def __enter__(self):
return self
def close(self):
self._stack.close()
def set_size(self, size):
# Tqdm tries to be smart when to refresh,
# so we try to force it to re-render.
super().set_size(size)
self.progress_bar.refresh()
def call(self, hook_name=None, **kwargs):
self.progress_bar.update_to(self.value, total=self.size)
def branch(
self,
path_1: str,
path_2: str,
kwargs,
child: Optional[FsspecCallback] = None,
):
child = child or TqdmCallback(bytes=True, desc=path_1)
return super().branch(path_1, path_2, kwargs, child=child)
class RichCallback(FsspecCallback):
def __init__(
self,
size: Optional[int] = None,
value: int = 0,
progress: "RichTransferProgress" = None,
desc: str = None,
bytes: bool = False, # pylint: disable=redefined-builtin
unit: str = None,
disable: bool = False,
) -> None:
self._progress = progress
self.disable = disable
self._task_kwargs = {
"description": desc or "",
"bytes": bytes,
"unit": unit,
"total": size or 0,
"visible": False,
"progress_type": None if bytes else "summary",
}
self._stack = ExitStack()
super().__init__(size=size, value=value)
@cached_property
def progress(self):
from dvc.ui import ui
from dvc.ui._rich_progress import RichTransferProgress
if self._progress is not None:
return self._progress
progress = RichTransferProgress(
transient=True,
disable=self.disable,
console=ui.error_console,
)
return self._stack.enter_context(progress)
@cached_property
def task(self):
return self.progress.add_task(**self._task_kwargs)
def __enter__(self):
return self
def close(self):
self.progress.clear_task(self.task)
self._stack.close()
def call(self, hook_name=None, **kwargs):
self.progress.update(
self.task,
completed=self.value,
total=self.size,
visible=not self.disable,
)
def branch(
self, path_1, path_2, kwargs, child: Optional[FsspecCallback] = None
):
child = child or RichCallback(
progress=self.progress, desc=path_1, bytes=True
)
return super().branch(path_1, path_2, kwargs, child=child)
DEFAULT_CALLBACK = NoOpCallback()
|
[
"typing_extensions.ParamSpec",
"contextlib.ExitStack",
"functools.wraps",
"tqdm.utils.CallbackIOWrapper",
"typing.TypeVar",
"dvc.ui._rich_progress.RichTransferProgress",
"dvc.progress.Tqdm"
] |
[((408, 423), 'typing_extensions.ParamSpec', 'ParamSpec', (['"""_P"""'], {}), "('_P')\n", (417, 423), False, 'from typing_extensions import ParamSpec\n'), ((433, 446), 'typing.TypeVar', 'TypeVar', (['"""_R"""'], {}), "('_R')\n", (440, 446), False, 'from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar, overload\n'), ((968, 1021), 'tqdm.utils.CallbackIOWrapper', 'CallbackIOWrapper', (['self.relative_update', 'fobj', 'method'], {}), '(self.relative_update, fobj, method)\n', (985, 1021), False, 'from tqdm.utils import CallbackIOWrapper\n'), ((1124, 1133), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (1129, 1133), False, 'from functools import wraps\n'), ((1594, 1603), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (1599, 1603), False, 'from functools import wraps\n'), ((3566, 3577), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (3575, 3577), False, 'from contextlib import ExitStack\n'), ((5286, 5297), 'contextlib.ExitStack', 'ExitStack', ([], {}), '()\n', (5295, 5297), False, 'from contextlib import ExitStack\n'), ((5580, 5669), 'dvc.ui._rich_progress.RichTransferProgress', 'RichTransferProgress', ([], {'transient': '(True)', 'disable': 'self.disable', 'console': 'ui.error_console'}), '(transient=True, disable=self.disable, console=ui.\n error_console)\n', (5600, 5669), False, 'from dvc.ui._rich_progress import RichTransferProgress\n'), ((3835, 3860), 'dvc.progress.Tqdm', 'Tqdm', ([], {}), '(**self._tqdm_kwargs)\n', (3839, 3860), False, 'from dvc.progress import Tqdm\n')]
|
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import torch
import numpy as np
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestNe(TestCase):
def cpu_op_exec_scalar(self, input1, other):
output = torch.ne(input1, other)
output = output.numpy()
return output
def npu_op_exec_scalar(self,input1, other):
output = torch.ne(input1, other)
output1 = output.to("cpu")
output2 = output1.numpy()
return output2
def cpu_op_exec(self, input1, other):
output = torch.ne(input1, other)
output = output.numpy()
return output
def npu_op_exec(self,input1, other):
output = torch.ne(input1, other)
output = output.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_(self,input1, other):
torch.ne_(input1,other)
output = input1.numpy()
return output
def npu_op_exec_(self,input1, other):
torch.ne_(input1, other)
output = input1.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_scalar_(self,input1, other):
torch.ne_(input1,other)
output = input1.numpy()
return output
def npu_op_exec_scalar_(self,input1, other):
torch.ne_(input1, other)
output = input1.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_scalar_out(self,input1,other, out):
torch.ne(input1,other, out=out)
output = out.numpy()
return output
def npu_op_exec_scalar_out(self,input1, other, out):
torch.ne(input1, other, out=out)
output = out.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_out(self,input1,other, out):
torch.ne(input1,other, out=out)
output = out.numpy()
return output
def npu_op_exec_out(self,input1, other, out):
torch.ne(input1, other, out=out)
output = out.to("cpu")
output = output.numpy()
return output
def test_ne_scalar_common_shape_format(self, device):
shape_format = [
[[np.float32,0 , (2,4, 3)], 3],
[[np.float32, 3, (2, 3)], 2],
[[np.float32, 0, (3, 2)], 8],
[[np.int8, 0 , (4, 3)],3],
[[np.uint8, -1, (2,4, 3)],3],
[[np.int32, 0, (2, 6)],6]
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 10)
cpu_output = self.cpu_op_exec_scalar(cpu_input1, item[1])
npu_output = self.npu_op_exec_scalar(npu_input1, item[1])
self.assertRtolEqual(cpu_output, npu_output)
def test_ne_common_shape_format(self, device):
shape_format = [
[[np.float32,0 , (2, 4, 3)], [np.float32,0 , (2, 4, 3)]],
[[np.float32, 3, (2, 3)], [np.float32, 3, (2, 3)]],
[[np.float32, 0, (3, 2)], [np.float32, 0, (3, 2)]],
[[np.int8, 0 , (4, 3)], [np.int8, 0 , (4, 3)]],
[[np.uint8, -1, (2,4, 3)], [np.uint8, -1, (2,4, 3)]],
[[np.int32, 0, (2, 6)], [np.int32, 0, (2, 6)]],
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 10)
cpu_input2, npu_input2 = create_common_tensor(item[1], 1, 10)
cpu_output = self.cpu_op_exec(cpu_input1, cpu_input2)
npu_output = self.npu_op_exec(npu_input1, npu_input2)
self.assertRtolEqual(cpu_output, npu_output)
def test_ne_scalar_out_common_shape_format(self, device):
shape_format = [
[[np.float32,0 , (2, 4, 3)], 2, [np.bool, 0 , (2, 4, 3)]],
[[np.float32, 3, (2, 3)], 3, [np.bool, -1, (2, 3)]],
[[np.float32, 0, (3, 2)], 4, [np.bool, 0, (3, 2)]],
[[np.int8, 0 , (4, 3)], 5, [np.bool, 0 , (4, 3)]],
[[np.uint8, -1, (2,4, 3)], 6, [np.bool, -1, (2,4, 3)]],
[[np.int32, 0, (2, 6)], 7, [np.bool, 0, (2, 6)]]
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 10)
cpu_out, npu_out = create_common_tensor(item[2], 1, 10)
cpu_output = self.cpu_op_exec_scalar_out(cpu_input1, item[1], cpu_out)
npu_output = self.npu_op_exec_scalar_out(npu_input1, item[1], npu_out)
self.assertRtolEqual(cpu_output, npu_output)
def test_ne_out_common_shape_format(self, device):
shape_format = [
[[np.float32,0 , (2, 4, 3)], [np.float32,0 , (2, 4, 3)], [np.bool, 0 , (2, 4, 3)]],
[[np.float32, 3, (2, 3)], [np.float32, 3, (2, 3)], [np.bool, -1, (2, 3)]],
[[np.float32, 0, (3, 2)], [np.float32, 0, (3, 2)], [np.bool, 0, (3, 2)]],
[[np.int8, 0 , (4, 3)], [np.int8, 0 , (4, 3)], [np.bool, 0 , (4, 3)]],
[[np.uint8, -1, (2,4, 3)], [np.uint8, -1, (2,4, 3)], [np.bool, -1, (2,4, 3)]],
[[np.int32, 0, (2, 6)], [np.int32, 0, (2, 6)], [np.bool, 0, (2, 6)]]
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 1, 10)
cpu_input2, npu_input2 = create_common_tensor(item[1], 1, 10)
cpu_out, npu_out = create_common_tensor(item[2], 1, 10)
cpu_output = self.cpu_op_exec_out(cpu_input1, cpu_input2, cpu_out)
npu_output = self.npu_op_exec_out(npu_input1, npu_input2, npu_out)
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestNe, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
|
[
"util_test.create_common_tensor",
"torch.ne",
"torch.ne_",
"common_utils.run_tests"
] |
[((6502, 6513), 'common_utils.run_tests', 'run_tests', ([], {}), '()\n', (6511, 6513), False, 'from common_utils import TestCase, run_tests\n'), ((912, 935), 'torch.ne', 'torch.ne', (['input1', 'other'], {}), '(input1, other)\n', (920, 935), False, 'import torch\n'), ((1056, 1079), 'torch.ne', 'torch.ne', (['input1', 'other'], {}), '(input1, other)\n', (1064, 1079), False, 'import torch\n'), ((1232, 1255), 'torch.ne', 'torch.ne', (['input1', 'other'], {}), '(input1, other)\n', (1240, 1255), False, 'import torch\n'), ((1369, 1392), 'torch.ne', 'torch.ne', (['input1', 'other'], {}), '(input1, other)\n', (1377, 1392), False, 'import torch\n'), ((1532, 1556), 'torch.ne_', 'torch.ne_', (['input1', 'other'], {}), '(input1, other)\n', (1541, 1556), False, 'import torch\n'), ((1661, 1685), 'torch.ne_', 'torch.ne_', (['input1', 'other'], {}), '(input1, other)\n', (1670, 1685), False, 'import torch\n'), ((1832, 1856), 'torch.ne_', 'torch.ne_', (['input1', 'other'], {}), '(input1, other)\n', (1841, 1856), False, 'import torch\n'), ((1968, 1992), 'torch.ne_', 'torch.ne_', (['input1', 'other'], {}), '(input1, other)\n', (1977, 1992), False, 'import torch\n'), ((2146, 2178), 'torch.ne', 'torch.ne', (['input1', 'other'], {'out': 'out'}), '(input1, other, out=out)\n', (2154, 2178), False, 'import torch\n'), ((2295, 2327), 'torch.ne', 'torch.ne', (['input1', 'other'], {'out': 'out'}), '(input1, other, out=out)\n', (2303, 2327), False, 'import torch\n'), ((2471, 2503), 'torch.ne', 'torch.ne', (['input1', 'other'], {'out': 'out'}), '(input1, other, out=out)\n', (2479, 2503), False, 'import torch\n'), ((2613, 2645), 'torch.ne', 'torch.ne', (['input1', 'other'], {'out': 'out'}), '(input1, other, out=out)\n', (2621, 2645), False, 'import torch\n'), ((3187, 3223), 'util_test.create_common_tensor', 'create_common_tensor', (['item[0]', '(1)', '(10)'], {}), '(item[0], 1, 10)\n', (3207, 3223), False, 'from util_test import create_common_tensor\n'), ((4002, 4038), 'util_test.create_common_tensor', 'create_common_tensor', (['item[0]', '(1)', '(10)'], {}), '(item[0], 1, 10)\n', (4022, 4038), False, 'from util_test import create_common_tensor\n'), ((4076, 4112), 'util_test.create_common_tensor', 'create_common_tensor', (['item[1]', '(1)', '(10)'], {}), '(item[1], 1, 10)\n', (4096, 4112), False, 'from util_test import create_common_tensor\n'), ((4918, 4954), 'util_test.create_common_tensor', 'create_common_tensor', (['item[0]', '(1)', '(10)'], {}), '(item[0], 1, 10)\n', (4938, 4954), False, 'from util_test import create_common_tensor\n'), ((4986, 5022), 'util_test.create_common_tensor', 'create_common_tensor', (['item[2]', '(1)', '(10)'], {}), '(item[2], 1, 10)\n', (5006, 5022), False, 'from util_test import create_common_tensor\n'), ((6005, 6041), 'util_test.create_common_tensor', 'create_common_tensor', (['item[0]', '(1)', '(10)'], {}), '(item[0], 1, 10)\n', (6025, 6041), False, 'from util_test import create_common_tensor\n'), ((6079, 6115), 'util_test.create_common_tensor', 'create_common_tensor', (['item[1]', '(1)', '(10)'], {}), '(item[1], 1, 10)\n', (6099, 6115), False, 'from util_test import create_common_tensor\n'), ((6147, 6183), 'util_test.create_common_tensor', 'create_common_tensor', (['item[2]', '(1)', '(10)'], {}), '(item[2], 1, 10)\n', (6167, 6183), False, 'from util_test import create_common_tensor\n')]
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for augmentation_lib."""
from typing import Tuple
from absl.testing import absltest
from assessment_plan_modeling.ap_parsing import ap_parsing_lib
from assessment_plan_modeling.ap_parsing import augmentation_lib as aug_lib
def tuple_fragment(fragment):
return (str(fragment.labeled_char_span), fragment.text, fragment.prefix_delim,
fragment.suffix_delim)
def fragments_tuple(cluster):
return tuple(
set([tuple_fragment(fragment) for fragment in cluster.fragments]))
class StructuredAPTest(absltest.TestCase):
def test_build(self):
ap = "\n".join(
["50 yo m with hx of dm2, copd", "dm2: on insulin", "- RISS"])
labeled_char_spans = [
ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=29,
end_char=32),
ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION,
start_char=34,
end_char=44),
ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
start_char=47,
end_char=51),
]
expected = aug_lib.StructuredAP(
prefix_text="50 yo m with hx of dm2, copd",
problem_clusters=[
aug_lib.ProblemCluster(fragments=[
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=29,
end_char=32),
text="dm2",
prefix_delim=aug_lib._DefaultDelims.PROBLEM_TITLE_PREFIX,
suffix_delim=aug_lib._DefaultDelims.PROBLEM_TITLE_SUFFIX),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType
.PROBLEM_DESCRIPTION,
start_char=34,
end_char=44),
text="on insulin",
prefix_delim=aug_lib._DefaultDelims
.PROBLEM_DESCRIPTION_PREFIX,
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
start_char=47,
end_char=51),
text="RISS",
prefix_delim=aug_lib._DefaultDelims.ACTION_ITEM_PREFIX,
suffix_delim=""),
])
])
structured_ap = aug_lib.StructuredAP.build(ap, labeled_char_spans)
self.assertEqual(structured_ap, expected)
def test_compile(self):
structured_ap = aug_lib.StructuredAP(
prefix_text="50 yo m with hx of dm2, copd",
problem_clusters=[
aug_lib.ProblemCluster(fragments=[
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=29,
end_char=32),
text="dm2",
prefix_delim="\n*. ",
suffix_delim=": "),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType
.PROBLEM_DESCRIPTION,
start_char=34,
end_char=44),
text="on insulin",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
start_char=47,
end_char=51),
text="RISS",
prefix_delim="\n- ",
suffix_delim=""),
])
])
expected = "50 yo m with hx of dm2, copd\n*. dm2: on insulin\n- RISS"
result, _ = structured_ap.compile()
self.assertEqual(result, expected)
def test_compile_with_labels(self):
structured_ap = aug_lib.StructuredAP(
prefix_text="50 yo m with hx of dm2, copd",
problem_clusters=[ # spans are kept from *original* text.
aug_lib.ProblemCluster(fragments=[
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=29,
end_char=32),
text="dm2",
prefix_delim="\n*. ",
suffix_delim=": "),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType
.PROBLEM_DESCRIPTION,
start_char=34,
end_char=44),
text="on insulin",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
start_char=47,
end_char=51),
text="RISS",
prefix_delim="\n- ",
suffix_delim=""),
])
])
expected = (
"50 yo m with hx of dm2, copd\n*. dm2: on insulin\n- RISS",
[
ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=32,
end_char=35), # span_text="dm2"
ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION,
start_char=37,
end_char=47), # span_text="on insulin"
ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
start_char=50,
end_char=54), # span_text="RISS"
])
result_ap_text, result_labeled_char_spans = structured_ap.compile()
self.assertEqual((result_ap_text, result_labeled_char_spans), expected)
class AugmentationsTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.problem_clusters = [
aug_lib.ProblemCluster(fragments=[
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=29,
end_char=32),
text="dm2",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType
.PROBLEM_DESCRIPTION,
start_char=34,
end_char=44),
text="on insulin",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
action_item_type=ap_parsing_lib.ActionItemType.MEDICATIONS,
start_char=47,
end_char=51),
text="RISS",
prefix_delim="",
suffix_delim="")
]),
aug_lib.ProblemCluster(fragments=[
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=52,
end_char=58),
text="anemia",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
action_item_type=ap_parsing_lib.ActionItemType
.OBSERVATIONS_LABS,
start_char=59,
end_char=64),
text="trend",
prefix_delim="",
suffix_delim="")
]),
aug_lib.ProblemCluster(fragments=[
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=65,
end_char=69),
text="COPD",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
action_item_type=ap_parsing_lib.ActionItemType.MEDICATIONS,
start_char=70,
end_char=74),
text="nebs",
prefix_delim="",
suffix_delim="")
]),
aug_lib.ProblemCluster(fragments=[
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
start_char=75,
end_char=81),
text="sepsis",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType
.PROBLEM_DESCRIPTION,
start_char=82,
end_char=93),
text="dd pna, uti",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType
.PROBLEM_DESCRIPTION,
start_char=94,
end_char=117),
text="yesterday without fever",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
action_item_type=ap_parsing_lib.ActionItemType.MEDICATIONS,
start_char=118,
end_char=127),
text="cont. abx",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
action_item_type=ap_parsing_lib.ActionItemType
.OBSERVATIONS_LABS,
start_char=128,
end_char=131),
text="cis",
prefix_delim="",
suffix_delim=""),
aug_lib.ProblemClusterFragment(
labeled_char_span=ap_parsing_lib.LabeledCharSpan(
span_type=ap_parsing_lib.LabeledSpanType.ACTION_ITEM,
action_item_type=ap_parsing_lib.ActionItemType.CONSULTS,
start_char=132,
end_char=142),
text="id consult",
prefix_delim="",
suffix_delim="")
])
]
self.ap = aug_lib.StructuredAP(
problem_clusters=self.problem_clusters, prefix_text="")
def test_shuffle_clusters(self):
aug = aug_lib.ShuffleClusters()
augmented_ap = aug(self.ap, seed=0)
set_problem_clusters = set(
[fragments_tuple(cluster) for cluster in self.problem_clusters])
set_aug_clusters = set(
[fragments_tuple(cluster) for cluster in augmented_ap.problem_clusters])
self.assertEqual(set_problem_clusters, set_aug_clusters)
def test_shuffle_fragments(self):
aug = aug_lib.ShuffleFragments()
augmented_ap = aug(self.ap, seed=0)
self.assertEqual(
fragments_tuple(self.problem_clusters[0]),
fragments_tuple(augmented_ap.problem_clusters[0]))
def test_number_title_augmentation(self):
aug = aug_lib.NumberTitlesAugmentation(["\n{:d})"])
augmented_ap = aug(self.ap, seed=0)
expected = self.ap
for i, cluster in enumerate(expected.problem_clusters):
cluster.fragments[0].prefix_delim = f"\n{i+1})"
self.assertEqual(expected, augmented_ap)
def test_change_delim_augmentation(self):
aug = aug_lib.ChangeDelimAugmentation(
fragment_types=[
ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE,
ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION,
ap_parsing_lib.LabeledSpanType.ACTION_ITEM
],
delims=["*"])
augmented_ap = aug(self.ap, seed=0)
expected = self.ap
for cluster in expected.problem_clusters:
for fragment in cluster.fragments:
fragment.prefix_delim = "*"
self.assertEqual(expected, augmented_ap)
def test_apply_augmentations(self):
augs = aug_lib.AugmentationSequence(
name="test",
augmentation_sequence=[
aug_lib.NumberTitlesAugmentation(["\n{}."]),
aug_lib.ChangeDelimAugmentation(
[ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION],
["\n-- "]),
aug_lib.ChangeDelimAugmentation(
[ap_parsing_lib.LabeledSpanType.ACTION_ITEM], ["\n--- "])
])
results = aug_lib.apply_augmentations(self.ap, augs, seed=0)
expected = self.ap
for i, cluster in enumerate(expected.problem_clusters):
for fragment in cluster.fragments:
if fragment.labeled_char_span.span_type == ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE:
prefix_delim = f"\n{i+1}."
elif fragment.labeled_char_span.span_type == ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION:
prefix_delim = "\n-- "
elif fragment.labeled_char_span.span_type == ap_parsing_lib.LabeledSpanType.ACTION_ITEM:
prefix_delim = "\n--- "
fragment.prefix_delim = prefix_delim
self.assertEqual(expected, results)
if __name__ == "__main__":
absltest.main()
|
[
"absl.testing.absltest.main",
"assessment_plan_modeling.ap_parsing.augmentation_lib.ChangeDelimAugmentation",
"assessment_plan_modeling.ap_parsing.augmentation_lib.NumberTitlesAugmentation",
"assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan",
"assessment_plan_modeling.ap_parsing.augmentation_lib.apply_augmentations",
"assessment_plan_modeling.ap_parsing.augmentation_lib.ShuffleFragments",
"assessment_plan_modeling.ap_parsing.augmentation_lib.StructuredAP.build",
"assessment_plan_modeling.ap_parsing.augmentation_lib.StructuredAP",
"assessment_plan_modeling.ap_parsing.augmentation_lib.ShuffleClusters"
] |
[((15701, 15716), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (15714, 15716), False, 'from absl.testing import absltest\n'), ((3372, 3422), 'assessment_plan_modeling.ap_parsing.augmentation_lib.StructuredAP.build', 'aug_lib.StructuredAP.build', (['ap', 'labeled_char_spans'], {}), '(ap, labeled_char_spans)\n', (3398, 3422), True, 'from assessment_plan_modeling.ap_parsing import augmentation_lib as aug_lib\n'), ((12927, 13003), 'assessment_plan_modeling.ap_parsing.augmentation_lib.StructuredAP', 'aug_lib.StructuredAP', ([], {'problem_clusters': 'self.problem_clusters', 'prefix_text': '""""""'}), "(problem_clusters=self.problem_clusters, prefix_text='')\n", (12947, 13003), True, 'from assessment_plan_modeling.ap_parsing import augmentation_lib as aug_lib\n'), ((13059, 13084), 'assessment_plan_modeling.ap_parsing.augmentation_lib.ShuffleClusters', 'aug_lib.ShuffleClusters', ([], {}), '()\n', (13082, 13084), True, 'from assessment_plan_modeling.ap_parsing import augmentation_lib as aug_lib\n'), ((13448, 13474), 'assessment_plan_modeling.ap_parsing.augmentation_lib.ShuffleFragments', 'aug_lib.ShuffleFragments', ([], {}), '()\n', (13472, 13474), True, 'from assessment_plan_modeling.ap_parsing import augmentation_lib as aug_lib\n'), ((13703, 13748), 'assessment_plan_modeling.ap_parsing.augmentation_lib.NumberTitlesAugmentation', 'aug_lib.NumberTitlesAugmentation', (["['\\n{:d})']"], {}), "(['\\n{:d})'])\n", (13735, 13748), True, 'from assessment_plan_modeling.ap_parsing import augmentation_lib as aug_lib\n'), ((14027, 14245), 'assessment_plan_modeling.ap_parsing.augmentation_lib.ChangeDelimAugmentation', 'aug_lib.ChangeDelimAugmentation', ([], {'fragment_types': '[ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE, ap_parsing_lib.\n LabeledSpanType.PROBLEM_DESCRIPTION, ap_parsing_lib.LabeledSpanType.\n ACTION_ITEM]', 'delims': "['*']"}), "(fragment_types=[ap_parsing_lib.\n LabeledSpanType.PROBLEM_TITLE, ap_parsing_lib.LabeledSpanType.\n PROBLEM_DESCRIPTION, ap_parsing_lib.LabeledSpanType.ACTION_ITEM],\n delims=['*'])\n", (14058, 14245), True, 'from assessment_plan_modeling.ap_parsing import augmentation_lib as aug_lib\n'), ((15006, 15056), 'assessment_plan_modeling.ap_parsing.augmentation_lib.apply_augmentations', 'aug_lib.apply_augmentations', (['self.ap', 'augs'], {'seed': '(0)'}), '(self.ap, augs, seed=0)\n', (15033, 15056), True, 'from assessment_plan_modeling.ap_parsing import augmentation_lib as aug_lib\n'), ((1304, 1423), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE', 'start_char': '(29)', 'end_char': '(32)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_TITLE, start_char=29, end_char=32)\n', (1334, 1423), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((1465, 1590), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION', 'start_char': '(34)', 'end_char': '(44)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_DESCRIPTION, start_char=34, end_char=44)\n', (1495, 1590), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((1632, 1749), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.ACTION_ITEM', 'start_char': '(47)', 'end_char': '(51)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n ACTION_ITEM, start_char=47, end_char=51)\n', (1662, 1749), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((6613, 6732), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE', 'start_char': '(32)', 'end_char': '(35)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_TITLE, start_char=32, end_char=35)\n', (6643, 6732), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((6809, 6934), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION', 'start_char': '(37)', 'end_char': '(47)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_DESCRIPTION, start_char=37, end_char=47)\n', (6839, 6934), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((7018, 7135), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.ACTION_ITEM', 'start_char': '(50)', 'end_char': '(54)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n ACTION_ITEM, start_char=50, end_char=54)\n', (7048, 7135), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((14673, 14716), 'assessment_plan_modeling.ap_parsing.augmentation_lib.NumberTitlesAugmentation', 'aug_lib.NumberTitlesAugmentation', (["['\\n{}.']"], {}), "(['\\n{}.'])\n", (14705, 14716), True, 'from assessment_plan_modeling.ap_parsing import augmentation_lib as aug_lib\n'), ((14730, 14831), 'assessment_plan_modeling.ap_parsing.augmentation_lib.ChangeDelimAugmentation', 'aug_lib.ChangeDelimAugmentation', (['[ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION]', "['\\n-- ']"], {}), "([ap_parsing_lib.LabeledSpanType.\n PROBLEM_DESCRIPTION], ['\\n-- '])\n", (14761, 14831), True, 'from assessment_plan_modeling.ap_parsing import augmentation_lib as aug_lib\n'), ((14873, 14967), 'assessment_plan_modeling.ap_parsing.augmentation_lib.ChangeDelimAugmentation', 'aug_lib.ChangeDelimAugmentation', (['[ap_parsing_lib.LabeledSpanType.ACTION_ITEM]', "['\\n--- ']"], {}), "([ap_parsing_lib.LabeledSpanType.ACTION_ITEM\n ], ['\\n--- '])\n", (14904, 14967), True, 'from assessment_plan_modeling.ap_parsing import augmentation_lib as aug_lib\n'), ((7597, 7716), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE', 'start_char': '(29)', 'end_char': '(32)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_TITLE, start_char=29, end_char=32)\n', (7627, 7716), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((7947, 8072), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION', 'start_char': '(34)', 'end_char': '(44)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_DESCRIPTION, start_char=34, end_char=44)\n', (7977, 8072), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((8331, 8512), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.ACTION_ITEM', 'action_item_type': 'ap_parsing_lib.ActionItemType.MEDICATIONS', 'start_char': '(47)', 'end_char': '(51)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n ACTION_ITEM, action_item_type=ap_parsing_lib.ActionItemType.MEDICATIONS,\n start_char=47, end_char=51)\n', (8361, 8512), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((8814, 8933), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE', 'start_char': '(52)', 'end_char': '(58)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_TITLE, start_char=52, end_char=58)\n', (8844, 8933), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((9167, 9355), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.ACTION_ITEM', 'action_item_type': 'ap_parsing_lib.ActionItemType.OBSERVATIONS_LABS', 'start_char': '(59)', 'end_char': '(64)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n ACTION_ITEM, action_item_type=ap_parsing_lib.ActionItemType.\n OBSERVATIONS_LABS, start_char=59, end_char=64)\n', (9197, 9355), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((9678, 9797), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE', 'start_char': '(65)', 'end_char': '(69)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_TITLE, start_char=65, end_char=69)\n', (9708, 9797), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((10029, 10210), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.ACTION_ITEM', 'action_item_type': 'ap_parsing_lib.ActionItemType.MEDICATIONS', 'start_char': '(70)', 'end_char': '(74)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n ACTION_ITEM, action_item_type=ap_parsing_lib.ActionItemType.MEDICATIONS,\n start_char=70, end_char=74)\n', (10059, 10210), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((10512, 10631), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE', 'start_char': '(75)', 'end_char': '(81)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_TITLE, start_char=75, end_char=81)\n', (10542, 10631), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((10865, 10990), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION', 'start_char': '(82)', 'end_char': '(93)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_DESCRIPTION, start_char=82, end_char=93)\n', (10895, 10990), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((11250, 11376), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION', 'start_char': '(94)', 'end_char': '(117)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_DESCRIPTION, start_char=94, end_char=117)\n', (11280, 11376), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((11648, 11831), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.ACTION_ITEM', 'action_item_type': 'ap_parsing_lib.ActionItemType.MEDICATIONS', 'start_char': '(118)', 'end_char': '(127)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n ACTION_ITEM, action_item_type=ap_parsing_lib.ActionItemType.MEDICATIONS,\n start_char=118, end_char=127)\n', (11678, 11831), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((12084, 12274), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.ACTION_ITEM', 'action_item_type': 'ap_parsing_lib.ActionItemType.OBSERVATIONS_LABS', 'start_char': '(128)', 'end_char': '(131)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n ACTION_ITEM, action_item_type=ap_parsing_lib.ActionItemType.\n OBSERVATIONS_LABS, start_char=128, end_char=131)\n', (12114, 12274), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((12541, 12721), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.ACTION_ITEM', 'action_item_type': 'ap_parsing_lib.ActionItemType.CONSULTS', 'start_char': '(132)', 'end_char': '(142)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n ACTION_ITEM, action_item_type=ap_parsing_lib.ActionItemType.CONSULTS,\n start_char=132, end_char=142)\n', (12571, 12721), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((2039, 2158), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE', 'start_char': '(29)', 'end_char': '(32)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_TITLE, start_char=29, end_char=32)\n', (2069, 2158), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((2503, 2628), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION', 'start_char': '(34)', 'end_char': '(44)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_DESCRIPTION, start_char=34, end_char=44)\n', (2533, 2628), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((2991, 3108), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.ACTION_ITEM', 'start_char': '(47)', 'end_char': '(51)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n ACTION_ITEM, start_char=47, end_char=51)\n', (3021, 3108), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((3750, 3869), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE', 'start_char': '(29)', 'end_char': '(32)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_TITLE, start_char=29, end_char=32)\n', (3780, 3869), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((4139, 4264), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION', 'start_char': '(34)', 'end_char': '(44)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_DESCRIPTION, start_char=34, end_char=44)\n', (4169, 4264), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((4559, 4676), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.ACTION_ITEM', 'start_char': '(47)', 'end_char': '(51)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n ACTION_ITEM, start_char=47, end_char=51)\n', (4589, 4676), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((5371, 5490), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_TITLE', 'start_char': '(29)', 'end_char': '(32)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_TITLE, start_char=29, end_char=32)\n', (5401, 5490), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((5760, 5885), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.PROBLEM_DESCRIPTION', 'start_char': '(34)', 'end_char': '(44)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n PROBLEM_DESCRIPTION, start_char=34, end_char=44)\n', (5790, 5885), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n'), ((6180, 6297), 'assessment_plan_modeling.ap_parsing.ap_parsing_lib.LabeledCharSpan', 'ap_parsing_lib.LabeledCharSpan', ([], {'span_type': 'ap_parsing_lib.LabeledSpanType.ACTION_ITEM', 'start_char': '(47)', 'end_char': '(51)'}), '(span_type=ap_parsing_lib.LabeledSpanType.\n ACTION_ITEM, start_char=47, end_char=51)\n', (6210, 6297), False, 'from assessment_plan_modeling.ap_parsing import ap_parsing_lib\n')]
|
# Copyright (c) 2017 lululemon athletica Canada inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tight.core.test_helpers as test_helpers
from tight.providers.aws.clients import boto3_client
def test_no_boom():
assert True, 'Module can be imported.'
def test_prepare_pills_record():
test_helpers.prepare_pills('record', 'some/path', boto3_client.session())
boto3_pill = getattr(test_helpers, 'boto3_pill')
dynamo_pill = getattr(test_helpers, 'pill')
# Do it again and make sure objects are the same
test_helpers.prepare_pills('record', 'some/path', boto3_client.session())
boto3_pill_cached = getattr(test_helpers, 'boto3_pill')
dynamo_pill_cached = getattr(test_helpers, 'pill')
assert boto3_pill == boto3_pill_cached, 'boto3 pill is cached'
assert dynamo_pill == dynamo_pill_cached, 'dynamo pill is cached'
def test_prepare_pills_playback():
test_helpers.prepare_pills('playback', 'some/path', boto3_client.session())
boto3_pill = getattr(test_helpers, 'boto3_pill')
dynamo_pill = getattr(test_helpers, 'pill')
# Do it again and make sure objects are the same
test_helpers.prepare_pills('playback', 'some/path', boto3_client.session())
boto3_pill_cached = getattr(test_helpers, 'boto3_pill')
dynamo_pill_cached = getattr(test_helpers, 'pill')
assert boto3_pill == boto3_pill_cached, 'boto3 pill is cached'
assert dynamo_pill == dynamo_pill_cached, 'dynamo pill is cached'
def test_placebos_path_playback():
result = test_helpers.placebos_path('/some/absolute/path.py', 'my_namespace')
assert result == '/some/absolute/placebos/my_namespace'
def test_placebos_path_record(tmpdir):
test_file = '{}/some_test.py'.format(tmpdir)
with open(test_file, 'w') as tmp_test_file:
tmp_test_file.write('')
tmpdir.mkdir('placebos')
result = test_helpers.placebos_path(test_file, 'some_test', mode='record')
assert result == '{}/placebos/some_test'.format(tmpdir)
assert os.path.isdir(result), 'Namespaced placebos directory exists'
def test_placebos_path_record_placebos_exist(tmpdir):
test_file = '{}/some_test.py'.format(tmpdir)
with open(test_file, 'w') as tmp_test_file:
tmp_test_file.write('')
tmpdir.mkdir('placebos')
result = test_helpers.placebos_path(test_file, 'some_test', mode='record')
assert result == '{}/placebos/some_test'.format(tmpdir)
assert os.path.isdir(result), 'Namespaced placebos directory exists'
disappearing_file = '{}/i_should_not_exist.txt'.format(result)
with open(disappearing_file, 'w') as file_to_make_disappear:
file_to_make_disappear.write('make me disappear')
assert os.listdir(result)[0] == 'i_should_not_exist.txt'
result2 = test_helpers.placebos_path(test_file, 'some_test', mode='record')
assert len(os.listdir(result2)) == 0
|
[
"os.path.isdir",
"tight.providers.aws.clients.boto3_client.session",
"os.listdir",
"tight.core.test_helpers.placebos_path"
] |
[((2025, 2093), 'tight.core.test_helpers.placebos_path', 'test_helpers.placebos_path', (['"""/some/absolute/path.py"""', '"""my_namespace"""'], {}), "('/some/absolute/path.py', 'my_namespace')\n", (2051, 2093), True, 'import tight.core.test_helpers as test_helpers\n'), ((2367, 2432), 'tight.core.test_helpers.placebos_path', 'test_helpers.placebos_path', (['test_file', '"""some_test"""'], {'mode': '"""record"""'}), "(test_file, 'some_test', mode='record')\n", (2393, 2432), True, 'import tight.core.test_helpers as test_helpers\n'), ((2505, 2526), 'os.path.isdir', 'os.path.isdir', (['result'], {}), '(result)\n', (2518, 2526), False, 'import os\n'), ((2795, 2860), 'tight.core.test_helpers.placebos_path', 'test_helpers.placebos_path', (['test_file', '"""some_test"""'], {'mode': '"""record"""'}), "(test_file, 'some_test', mode='record')\n", (2821, 2860), True, 'import tight.core.test_helpers as test_helpers\n'), ((2933, 2954), 'os.path.isdir', 'os.path.isdir', (['result'], {}), '(result)\n', (2946, 2954), False, 'import os\n'), ((3260, 3325), 'tight.core.test_helpers.placebos_path', 'test_helpers.placebos_path', (['test_file', '"""some_test"""'], {'mode': '"""record"""'}), "(test_file, 'some_test', mode='record')\n", (3286, 3325), True, 'import tight.core.test_helpers as test_helpers\n'), ((864, 886), 'tight.providers.aws.clients.boto3_client.session', 'boto3_client.session', ([], {}), '()\n', (884, 886), False, 'from tight.providers.aws.clients import boto3_client\n'), ((1096, 1118), 'tight.providers.aws.clients.boto3_client.session', 'boto3_client.session', ([], {}), '()\n', (1116, 1118), False, 'from tight.providers.aws.clients import boto3_client\n'), ((1465, 1487), 'tight.providers.aws.clients.boto3_client.session', 'boto3_client.session', ([], {}), '()\n', (1485, 1487), False, 'from tight.providers.aws.clients import boto3_client\n'), ((1699, 1721), 'tight.providers.aws.clients.boto3_client.session', 'boto3_client.session', ([], {}), '()\n', (1719, 1721), False, 'from tight.providers.aws.clients import boto3_client\n'), ((3196, 3214), 'os.listdir', 'os.listdir', (['result'], {}), '(result)\n', (3206, 3214), False, 'import os\n'), ((3341, 3360), 'os.listdir', 'os.listdir', (['result2'], {}), '(result2)\n', (3351, 3360), False, 'import os\n')]
|
# project/server/__init__.py
import os
from flask import Flask, make_response, jsonify
app = Flask(__name__)
app_settings = os.getenv(
'APP_SETTINGS',
'project.server.config.DevelopmentConfig'
)
app.config.from_object(app_settings)
from project.server.api.routes import api_blueprint
app.register_blueprint(api_blueprint)
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST')
return response
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify({
'status': '400', 'error': 'Not found'}), 400)
@app.errorhandler(404)
def page_not_found(error):
return make_response(jsonify({
'status': '404', 'error': 'Not Found'}), 404)
@app.errorhandler(500)
def internal_server(error):
return make_response(jsonify({
'status': '500', 'error': 'Something went wrong'}), 500)
|
[
"flask.jsonify",
"flask.Flask",
"os.getenv"
] |
[((98, 113), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (103, 113), False, 'from flask import Flask, make_response, jsonify\n'), ((131, 199), 'os.getenv', 'os.getenv', (['"""APP_SETTINGS"""', '"""project.server.config.DevelopmentConfig"""'], {}), "('APP_SETTINGS', 'project.server.config.DevelopmentConfig')\n", (140, 199), False, 'import os\n'), ((688, 736), 'flask.jsonify', 'jsonify', (["{'status': '400', 'error': 'Not found'}"], {}), "({'status': '400', 'error': 'Not found'})\n", (695, 736), False, 'from flask import Flask, make_response, jsonify\n'), ((829, 877), 'flask.jsonify', 'jsonify', (["{'status': '404', 'error': 'Not Found'}"], {}), "({'status': '404', 'error': 'Not Found'})\n", (836, 877), False, 'from flask import Flask, make_response, jsonify\n'), ((971, 1030), 'flask.jsonify', 'jsonify', (["{'status': '500', 'error': 'Something went wrong'}"], {}), "({'status': '500', 'error': 'Something went wrong'})\n", (978, 1030), False, 'from flask import Flask, make_response, jsonify\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 13:10:21 2018
@author: DrLC
"""
from symtab import build_symtab
import constraint
import cfg
import os, sys
from interval import interval
class FCG(object):
def __init__(self, _cfg={}, _main="", _symtab={}):
assert _main in _cfg.keys()
self.__entry_set_enable = True
self.__name = _main
main = constraint.CG(_cfg[_main], _symtab[_main], _main)
self.__main = main.get_constraint_nodes()
self.__entry = self.__entry_reorder(main.get_entry_nodes(), _symtab[_main]['decl'])
self.__return = main.get_return_node()
self.__call = []
tb_del = []
for i in self.__main:
if type(i) is constraint.CallNode:
tb_del.append(i)
tmp_func_name = i.get_name().strip().split("(")[0].strip()
assert tmp_func_name in _cfg.keys()
tmp_func_cg = constraint.CG(_cfg[tmp_func_name],
_symtab[tmp_func_name],
tmp_func_name)
self.__call += tmp_func_cg.get_constraint_nodes()
tmp_func_entry = self.__entry_reorder(tmp_func_cg.get_entry_nodes(),
_symtab[tmp_func_name]['decl'])
for en, prv in zip(tmp_func_entry, i.get_prev()):
if en is None:
continue
prv.del_next(i)
prv.add_next(en)
en.add_prev(prv)
tmp_func_return = tmp_func_cg.get_return_node()
assert len(i.get_next()) == 1
nxt = i.get_next()[0]
nxt.del_prev(i)
nxt.add_prev(tmp_func_return)
tmp_func_return.add_next(nxt)
for i in tb_del:
self.__main.remove(i)
self.__constraint = self.__main + self.__call
self.__simplify()
tb_del = []
for i in self.__main:
if i not in self.__constraint:
tb_del.append(i)
for i in tb_del:
self.__main.remove(i)
tb_del = []
for i in self.__call:
if i not in self.__constraint:
tb_del.append(i)
for i in tb_del:
self.__call.remove(i)
def __entry_reorder(self, _entry=[], _funcdecl=None):
ret = []
for a in _funcdecl.get_args():
match_flag = False
for e in _entry:
if e.get_name().startswith(a.get_name()):
ret.append(e)
match_flag = True
break
if not match_flag:
ret.append(None)
return ret
def __simplify(self):
while self.__backward_simplify_iter():
pass
while self.__forward_simplify_iter():
pass
tb_del = []
for c in self.__constraint:
if len(c.get_next()) == 0 and len(c.get_prev()) == 0:
tb_del.append(c)
for c in tb_del:
self.__constraint.remove(c)
def __backward_simplify_iter(self):
ret = []
for c in self.__constraint:
if c.get_prev() == []:
continue
tb_del_ = []
for prv in c.get_prev():
if prv not in self.__constraint:
tb_del_.append(prv)
for i in tb_del_:
c.del_prev(i)
if c.get_prev() == []:
ret.append(c)
for i in ret:
self.__constraint.remove(i)
if i in self.__entry:
self.__entry.remove(i)
if len(ret) == 0:
return False
else:
return True
def __forward_simplify_iter(self):
ret = []
if self.__return is None:
ret = self.__constraint
else:
for c in self.__constraint:
if (type(c) is constraint.VarNode and len(c.get_next()) == 0
and (not self.__return.get_number() == c.get_number())):
ret.append(c)
for prv in c.get_prev():
prv.del_next(c)
for i in ret:
self.__constraint.remove(i)
if i in self.__entry:
self.__entry.remove(i)
if len(ret) == 0:
return False
else:
return True
def debug(self):
print ()
print ('Full constraint graph ' + self.__name)
if len(self.__entry) == 0:
print ('No entry')
else:
print ('Entry: ', end="")
for i in self.__entry:
print (str(i.get_number()), end=" ")
print ()
if self.__return is None:
print ("No return")
else:
print ("Return: "+str(self.__return.get_number()))
if len(self.__constraint) == 0:
print ("No constraint")
else:
print ("Constraint:")
for c in self.__constraint:
c.debug()
def set_entry_range(self, _range={}):
assert len(_range) == len(self.__entry)
assert self.__entry_set_enable
for en, r in zip(self.__entry, _range):
if en is None:
continue
assert en.get_name().startswith(r[2])
for prv in en.get_prev():
self.__constraint.remove(prv)
en.clr_prev()
tmp_node = constraint.RangeNode(r[0], r[1])
self.__constraint.append(tmp_node)
en.add_prev(tmp_node)
tmp_node.add_next(en)
self.__entry = []
self.__entry_set_enable = False
for c in self.__constraint:
if type(c) is constraint.RangeNode and len(c.get_prev()) == 0:
assert (len(c.get_next()) == 1)
if type(c.get_next()[0]) is constraint.VarNode:
c.get_next()[0].set_minmax_widen(c.get_interval())
self.__entry.append(c.get_next()[0])
def get_name(self): return self.__name
def get_constraint_nodes(self): return self.__main + self.__call
def get_entry_nodes(self): return self.__entry
def get_return_node(self): return self.__return
def print_help():
print ()
print ("+---------------------------------+")
print ("| |")
print ("| Full Constraint Graph |")
print ("| by DrLC |")
print ("| |")
print ("+---------------------------------+")
print ()
print ("Transfer .ssa file to constraint graph, and embed the function calling.")
print ()
print ("Use this command to run.")
print (" python3 %s [-P|--path SSA_FILE_PATH]" % sys.argv[0])
print ()
exit(0)
def get_op():
args = sys.argv
if '-h' in args or '--help' in args:
print_help()
if len(args) == 1:
path = '../benchmark/t9.ssa'
elif len(args) == 3 and args[1] in ['-P', '--path']:
path = args[2]
else:
print_help()
return path
if __name__ == "__main__":
path = get_op()
sym_tab = build_symtab(path)
with open(path, 'r') as f:
lines = f.readlines()
_cfg_ = {}
for key in sym_tab.keys():
_cfg_[key] = cfg.CFG(lines[sym_tab[key]["lines"][1]:sym_tab[key]["lines"][2]],
key)
cg = FCG(_cfg_, "foo", sym_tab)
cg.debug()
|
[
"constraint.CG",
"symtab.build_symtab",
"constraint.RangeNode",
"cfg.CFG"
] |
[((7446, 7464), 'symtab.build_symtab', 'build_symtab', (['path'], {}), '(path)\n', (7458, 7464), False, 'from symtab import build_symtab\n'), ((416, 465), 'constraint.CG', 'constraint.CG', (['_cfg[_main]', '_symtab[_main]', '_main'], {}), '(_cfg[_main], _symtab[_main], _main)\n', (429, 465), False, 'import constraint\n'), ((7593, 7663), 'cfg.CFG', 'cfg.CFG', (["lines[sym_tab[key]['lines'][1]:sym_tab[key]['lines'][2]]", 'key'], {}), "(lines[sym_tab[key]['lines'][1]:sym_tab[key]['lines'][2]], key)\n", (7600, 7663), False, 'import cfg\n'), ((5683, 5715), 'constraint.RangeNode', 'constraint.RangeNode', (['r[0]', 'r[1]'], {}), '(r[0], r[1])\n', (5703, 5715), False, 'import constraint\n'), ((967, 1040), 'constraint.CG', 'constraint.CG', (['_cfg[tmp_func_name]', '_symtab[tmp_func_name]', 'tmp_func_name'], {}), '(_cfg[tmp_func_name], _symtab[tmp_func_name], tmp_func_name)\n', (980, 1040), False, 'import constraint\n')]
|
from crc.scripts.script import Script
from crc.services.failing_service import FailingService
class FailingScript(Script):
def get_description(self):
return """It fails"""
def do_task_validate_only(self, task, *args, **kwargs):
pass
def do_task(self, task, *args, **kwargs):
FailingService.fail_as_service()
|
[
"crc.services.failing_service.FailingService.fail_as_service"
] |
[((317, 349), 'crc.services.failing_service.FailingService.fail_as_service', 'FailingService.fail_as_service', ([], {}), '()\n', (347, 349), False, 'from crc.services.failing_service import FailingService\n')]
|
import gi
gi.require_version('Handy', '0.0')
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Handy
class MyWindow(Gtk.Window):
def __init__(self):
# https://lazka.github.io/pgi-docs/Gtk-3.0/classes/Window.html
Gtk.Window.__init__(self)
self.set_title("Switches Example")
self.connect("destroy", Gtk.main_quit)
self.set_size_request(350, 350)
# Create List Box
# https://lazka.github.io/pgi-docs/Gtk-3.0/classes/ListBox.html
box = Gtk.ListBox()
box.set_selection_mode(Gtk.SelectionMode.NONE)
# use the libhandy function to add separators to listbox rows
# https://lazka.github.io/pgi-docs/#Handy-0.0/functions.html#Handy.list_box_separator_header
box.set_header_func(Handy.list_box_separator_header)
# Add some rows
box.add(self.addrow("London"))
box.add(self.addrow("Berlin"))
box.add(self.addrow("Prague"))
# Add List box to main window
self.add(box)
def addrow(self, title):
# https://lazka.github.io/pgi-docs/#Handy-0.0/classes/ActionRow.html
row = Handy.ActionRow()
row.set_title(title)
# Add action to row
switch = Gtk.Switch.new()
switch.set_valign(Gtk.Align.CENTER)
row.add_action(switch)
return row
# https://lazka.github.io/pgi-docs/#Handy-0.0/functions.html#Handy.init
Handy.init()
window = MyWindow()
window.show_all()
Gtk.main()
|
[
"gi.require_version",
"gi.repository.Gtk.main",
"gi.repository.Gtk.ListBox",
"gi.repository.Handy.ActionRow",
"gi.repository.Handy.init",
"gi.repository.Gtk.Switch.new",
"gi.repository.Gtk.Window.__init__"
] |
[((11, 45), 'gi.require_version', 'gi.require_version', (['"""Handy"""', '"""0.0"""'], {}), "('Handy', '0.0')\n", (29, 45), False, 'import gi\n'), ((46, 78), 'gi.require_version', 'gi.require_version', (['"""Gtk"""', '"""3.0"""'], {}), "('Gtk', '3.0')\n", (64, 78), False, 'import gi\n'), ((1472, 1484), 'gi.repository.Handy.init', 'Handy.init', ([], {}), '()\n', (1482, 1484), False, 'from gi.repository import Gtk, Handy\n'), ((1524, 1534), 'gi.repository.Gtk.main', 'Gtk.main', ([], {}), '()\n', (1532, 1534), False, 'from gi.repository import Gtk, Handy\n'), ((256, 281), 'gi.repository.Gtk.Window.__init__', 'Gtk.Window.__init__', (['self'], {}), '(self)\n', (275, 281), False, 'from gi.repository import Gtk, Handy\n'), ((535, 548), 'gi.repository.Gtk.ListBox', 'Gtk.ListBox', ([], {}), '()\n', (546, 548), False, 'from gi.repository import Gtk, Handy\n'), ((1183, 1200), 'gi.repository.Handy.ActionRow', 'Handy.ActionRow', ([], {}), '()\n', (1198, 1200), False, 'from gi.repository import Gtk, Handy\n'), ((1280, 1296), 'gi.repository.Gtk.Switch.new', 'Gtk.Switch.new', ([], {}), '()\n', (1294, 1296), False, 'from gi.repository import Gtk, Handy\n')]
|
# Generated by Django 2.0.4 on 2018-04-21 15:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('description', models.TextField(max_length=500)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.TextField(max_length=500)),
('created_date', models.DateField(auto_now=True)),
('created_time', models.TimeField(auto_now=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('created_date', 'created_time'),
},
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('details', models.TextField(max_length=1000)),
('venue', models.CharField(max_length=50)),
('date', models.DateField(help_text='Please use the following format: <em>YYYY-MM-DD</em>.')),
('time', models.TimeField()),
('attendees', models.ManyToManyField(blank=True, related_name='attending', to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='events.Category')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'events',
'verbose_name': 'event',
},
),
migrations.AddField(
model_name='comment',
name='event',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='events.Event'),
),
]
|
[
"django.db.models.TextField",
"django.db.migrations.swappable_dependency",
"django.db.models.ManyToManyField",
"django.db.models.TimeField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.AutoField",
"django.db.models.DateField"
] |
[((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((2511, 2622), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""comments"""', 'to': '"""events.Event"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='comments', to='events.Event')\n", (2528, 2622), False, 'from django.db import migrations, models\n'), ((437, 530), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (453, 530), False, 'from django.db import migrations, models\n'), ((554, 598), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'unique': '(True)'}), '(max_length=50, unique=True)\n', (570, 598), False, 'from django.db import migrations, models\n'), ((633, 665), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (649, 665), False, 'from django.db import migrations, models\n'), ((798, 891), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (814, 891), False, 'from django.db import migrations, models\n'), ((915, 947), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (931, 947), False, 'from django.db import migrations, models\n'), ((983, 1014), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (999, 1014), False, 'from django.db import migrations, models\n'), ((1050, 1081), 'django.db.models.TimeField', 'models.TimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1066, 1081), False, 'from django.db import migrations, models\n'), ((1115, 1211), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1132, 1211), False, 'from django.db import migrations, models\n'), ((1436, 1529), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1452, 1529), False, 'from django.db import migrations, models\n'), ((1553, 1584), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1569, 1584), False, 'from django.db import migrations, models\n'), ((1615, 1648), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(1000)'}), '(max_length=1000)\n', (1631, 1648), False, 'from django.db import migrations, models\n'), ((1677, 1708), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1693, 1708), False, 'from django.db import migrations, models\n'), ((1736, 1824), 'django.db.models.DateField', 'models.DateField', ([], {'help_text': '"""Please use the following format: <em>YYYY-MM-DD</em>."""'}), "(help_text=\n 'Please use the following format: <em>YYYY-MM-DD</em>.')\n", (1752, 1824), False, 'from django.db import migrations, models\n'), ((1847, 1865), 'django.db.models.TimeField', 'models.TimeField', ([], {}), '()\n', (1863, 1865), False, 'from django.db import migrations, models\n'), ((1898, 1992), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""attending"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, related_name='attending', to=settings.\n AUTH_USER_MODEL)\n", (1920, 1992), False, 'from django.db import migrations, models\n'), ((2019, 2131), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""events"""', 'to': '"""events.Category"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='events', to='events.Category')\n", (2036, 2131), False, 'from django.db import migrations, models\n'), ((2157, 2253), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (2174, 2253), False, 'from django.db import migrations, models\n')]
|
"""
python 操作 Excel
"""
import xlrd
import xlwt
# #读取Excel文件
# workbook = xlrd.open_workbook(filename)
# #获取所有表名
# sheet_names = workbook.sheets()
#
# # #通过索引顺序获取一个工作表
# sheet0 = workbook.sheets()[0]
# # # or
# sheet1 = workbook.sheet_by_index(1)
# # #通过表名获取一个工作表
# sheet3 = workbook.sheet1
# # for i in sheet_names:
# print(sheet3)
def read_xlrd(excelTestFile):
data = xlrd.open_workbook(excelTestFile) # 打开路径下的文件,并读取
table = data.sheet_by_index(0) # 根据sheet的索引,确定表格;也可以根据sheet名称确定,如:table = data.sheet_by_name('用户表')
for rowNum in range(table.nrows): # excel表格中的有效行数,从0开始
rowVale = table.row_values(rowNum) # row_values返回该行中所有数据组成的一个列表
for colNum in range(table.ncols): # excel表格中的有效列数,从0开始
if rowNum > 0 and colNum == 0:
print(int(rowVale[0]))
else:
print(rowVale[colNum])
print("#############################")
if __name__ == '__main__':
excelFile = r"C:\Users\CXS\Desktop\公司文件\机架信息.xlsm"
read_xlrd(excelTestFile=excelFile)
|
[
"xlrd.open_workbook"
] |
[((381, 414), 'xlrd.open_workbook', 'xlrd.open_workbook', (['excelTestFile'], {}), '(excelTestFile)\n', (399, 414), False, 'import xlrd\n')]
|
from __future__ import division
#--------------------------#
import sys
#--------------------------#
import sympy as s
from sympy import cos, sin, pi
from sympy.matrices import Matrix as mat
#=====================================================#
from helperfunctions import matmul_series
sys.path.append("../int/misc-tools/")
import parsingtools as parse
#--------------------------------------------------------------------#
def diff_mat(M,param):
diff = lambda y: s.diff(y,param)
sh = M.shape
return mat(map(diff, M)).reshape(*sh)
#--------------------------------------------------------------------#
def matmul_series( *args ):
return reduce(s.Matrix.multiply, args)
#--------------------------------------------------------------------#
def _spin_tensor_diff_mat_ang(R,ang):
return matmul_series(diff_mat(mat_rot_z(ang),ang), mat_rot_z(ang).T)
#--------------------------------------------------------------------#
def spin_tensor(R,ang):
dang_dt = s.sympify('d'+str(ang)+'/dt')
return _spin_tensor_diff_mat_ang(R,ang) * dang_dt
#--------------------------------------------------------------------#
def mat_trans_x( tx ):
return mat([[1, 0, 0, tx],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
#--------------------------------------------------------------------#
def mat_trans_z( tz ):
return mat([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, tz],
[0, 0, 0, 1]])
#--------------------------------------------------------------------#
def mat_rot_x( ang ):
#convert to radians
c = cos(ang)
s = sin(ang)
return mat([[1, 0, 0, 0],
[0, c, -s, 0],
[0, s, c, 0],
[0, 0, 0, 1]])
#--------------------------------------------------------------------#
def mat_rot_z( ang ):
#convert to radians
c = cos(ang)
s = sin(ang)
return mat([[c, -s, 0, 0],
[s, c, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
#--------------------------------------------------------------------#
def transform_to_next(A, alpha, D, theta):
Rz_J = mat_rot_z(theta)
Tz_J = mat_trans_z(D)
Tx_I = mat_trans_x(A)
Rx_I = mat_rot_x(alpha)
return matmul_series(Rz_J, Tz_J, Tx_I, Rx_I)
#--------------------------------------------------------------------#
def DH_params( *params ):
nbr_of_sections = int(len(params) / 4)
if len(params) == 1 and type(params[0]) in [list, tuple]:
raise ArithmeticError("Function does not use lists or tuples, please unpack using *.")
elif not (len(params) % 4 == 0):
raise ArithmeticError("Invalid number of Denavit-Hartenberg parameters.")
matrices = []
for k in xrange(0, nbr_of_sections):
A, alpha, D, theta = params[4*k:4*k+4]
matrices.append( transform_to_next(A, alpha, D, theta) )
return matmul_series(*matrices)
#--------------------------------------------------------------------#
def calc_tool_IRB120(a=None,b=None,c=None,d=None,e=None,f=None):
if a is None:
a = s.sympify('a')
if b is None:
b = s.sympify('b')
if c is None:
c = s.sympify('c')
if d is None:
d = s.sympify('d')
if e is None:
e = s.sympify('e')
if f is None:
f = s.sympify('f')
flange = DH_params(
0, 90,0.290,180+a,
0.270,0,0,90+b,
-0.070, 90, 0, 180+c,
0, 90, 0.302, 180+d,
0, 90, 0, 180+e,
0, 0, 0.072, 0+f
)
return flange
#--------------------------------------------------------------------#
def custom_round(v, prec = 1e-4):
coef = 1 / prec
return n.round(v * coef) / coef
#--------------------------------------------------------------------#
if __name__ == '__main__':
a = s.sympify('a')
|
[
"sys.path.append",
"sympy.cos",
"sympy.sympify",
"sympy.diff",
"sympy.sin",
"sympy.matrices.Matrix",
"helperfunctions.matmul_series"
] |
[((298, 335), 'sys.path.append', 'sys.path.append', (['"""../int/misc-tools/"""'], {}), "('../int/misc-tools/')\n", (313, 335), False, 'import sys\n'), ((1197, 1259), 'sympy.matrices.Matrix', 'mat', (['[[1, 0, 0, tx], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, tx], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (1200, 1259), True, 'from sympy.matrices import Matrix as mat\n'), ((1486, 1548), 'sympy.matrices.Matrix', 'mat', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, tz], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, tz], [0, 0, 0, 1]])\n', (1489, 1548), True, 'from sympy.matrices import Matrix as mat\n'), ((1781, 1789), 'sympy.cos', 'cos', (['ang'], {}), '(ang)\n', (1784, 1789), False, 'from sympy import cos, sin, pi\n'), ((1799, 1807), 'sympy.sin', 'sin', (['ang'], {}), '(ang)\n', (1802, 1807), False, 'from sympy import cos, sin, pi\n'), ((1820, 1882), 'sympy.matrices.Matrix', 'mat', (['[[1, 0, 0, 0], [0, c, -s, 0], [0, s, c, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, c, -s, 0], [0, s, c, 0], [0, 0, 0, 1]])\n', (1823, 1882), True, 'from sympy.matrices import Matrix as mat\n'), ((2118, 2126), 'sympy.cos', 'cos', (['ang'], {}), '(ang)\n', (2121, 2126), False, 'from sympy import cos, sin, pi\n'), ((2136, 2144), 'sympy.sin', 'sin', (['ang'], {}), '(ang)\n', (2139, 2144), False, 'from sympy import cos, sin, pi\n'), ((2157, 2219), 'sympy.matrices.Matrix', 'mat', (['[[c, -s, 0, 0], [s, c, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[c, -s, 0, 0], [s, c, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (2160, 2219), True, 'from sympy.matrices import Matrix as mat\n'), ((2562, 2599), 'helperfunctions.matmul_series', 'matmul_series', (['Rz_J', 'Tz_J', 'Tx_I', 'Rx_I'], {}), '(Rz_J, Tz_J, Tx_I, Rx_I)\n', (2575, 2599), False, 'from helperfunctions import matmul_series\n'), ((3212, 3236), 'helperfunctions.matmul_series', 'matmul_series', (['*matrices'], {}), '(*matrices)\n', (3225, 3236), False, 'from helperfunctions import matmul_series\n'), ((4255, 4269), 'sympy.sympify', 's.sympify', (['"""a"""'], {}), "('a')\n", (4264, 4269), True, 'import sympy as s\n'), ((485, 501), 'sympy.diff', 's.diff', (['y', 'param'], {}), '(y, param)\n', (491, 501), True, 'import sympy as s\n'), ((3407, 3421), 'sympy.sympify', 's.sympify', (['"""a"""'], {}), "('a')\n", (3416, 3421), True, 'import sympy as s\n'), ((3454, 3468), 'sympy.sympify', 's.sympify', (['"""b"""'], {}), "('b')\n", (3463, 3468), True, 'import sympy as s\n'), ((3501, 3515), 'sympy.sympify', 's.sympify', (['"""c"""'], {}), "('c')\n", (3510, 3515), True, 'import sympy as s\n'), ((3548, 3562), 'sympy.sympify', 's.sympify', (['"""d"""'], {}), "('d')\n", (3557, 3562), True, 'import sympy as s\n'), ((3595, 3609), 'sympy.sympify', 's.sympify', (['"""e"""'], {}), "('e')\n", (3604, 3609), True, 'import sympy as s\n'), ((3642, 3656), 'sympy.sympify', 's.sympify', (['"""f"""'], {}), "('f')\n", (3651, 3656), True, 'import sympy as s\n')]
|
# list(map(int, input().split()))
# int(input())
import sys
sys.setrecursionlimit(10 ** 9)
from itertools import combinations
def main(*args):
N, XY = args
def difference(p1, p2):
return p2[0] - p1[0], p2[1] - p1[1]
for c in combinations(XY, r = 3):
p1, p2, p3 = c # p = (x, y)
diff12 = difference(p1, p2)
diff13 = difference(p1, p3)
if diff12[1] * diff13[0] == diff13[1] * diff12[0]:
print('Yes')
break
else:
print('No')
if __name__ == '__main__':
N = int(input())
args = [N]
args.append({tuple(map(int, input().split())) for n in range(N)})
main(*args)
|
[
"itertools.combinations",
"sys.setrecursionlimit"
] |
[((61, 91), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 9)'], {}), '(10 ** 9)\n', (82, 91), False, 'import sys\n'), ((250, 271), 'itertools.combinations', 'combinations', (['XY'], {'r': '(3)'}), '(XY, r=3)\n', (262, 271), False, 'from itertools import combinations\n')]
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from os import path
from subprocess import check_output
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
version = check_output('git describe --abbrev=0'.split(' ')).decode(
'utf-8').strip()
setup(
name='invoke-tools',
version=version,
description='A set of tools to use the Invoke task runner easier in a work-flow.',
url='https://github.com/VJftw/invoke-tools',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
zip_safe=False,
packages=find_packages(),
install_requires=['docker', 'invoke', 'psutil', 'py-cpuinfo', 'gitpython', 'requests'],
extras_require={
'test': ['nose', 'coverage', 'rednose']
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='invoke tools'
)
|
[
"os.path.dirname",
"os.path.join",
"setuptools.find_packages"
] |
[((359, 381), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (371, 381), False, 'from os import path\n'), ((442, 471), 'os.path.join', 'path.join', (['here', '"""README.rst"""'], {}), "(here, 'README.rst')\n", (451, 471), False, 'from os import path\n'), ((914, 929), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (927, 929), False, 'from setuptools import setup, find_packages\n')]
|
import sys
import time
import pandas as pd
from os import listdir
from os.path import isfile, join
import json
from .tasks import *
PATH_BERT = '/home/users/whwodud98/pytorch-pretrained-BERT'
sys.path.insert(0, PATH_BERT)
PATH_SENTEVAL = '/home/users/whwodud98/bert/SentEval'
PATH_TO_DATA = '/home/users/whwodud98/bert/SentEval/data/'
PATH_TO_CACHE = '/home/users/whwodud98/bert/cache/'
sys.path.insert(0, PATH_SENTEVAL)
import senteval
def get_results(dir_path='./mlp_results'):
columns = ['data_path', 'cache_path', 'result_path', 'batch_size', 'cbatch_size', 'nhid', 'optim', 'kfold',
'tenacity', 'usepytorch', 'epoch_size', 'device']
filenames = [f for f in listdir(dir_path) if isfile(join(dir_path, f)) if '.json' in f]
list_result = []
for filename in filenames:
with open(join(dir_path, filename), 'r') as infile:
# print(filename)
results = json.load(infile)
for key, result in results.items():
list_result.append(result)
df = pd.DataFrame(list_result)[['acc', 'devacc', 'devpearson', 'pearson', 'head', 'layer', 'task', 'model_name', 'location']]
for column in columns:
try:
df = df.drop(columns=column)
except:
pass
return df
def get_top_heads(model_name, task, metric='devacc', dir_path='./ds_linear_head_wise_results'):
df = get_results(dir_path=dir_path)
df = df.loc[df['model_name'] == model_name]
print(df)
df = df.loc[df['head'] >= 0]
df = df.loc[df['task'] == task] # Choose task
df = df.sort_values(by=[metric], ascending=False)
list_head = []
for index, row in df.iterrows():
list_head.append((row['layer'], row['head']))
return list_head
def save_exp_result(exp_result, task):
del exp_result['model']
exp_key = '{}_{}'.format(exp_result['num_head'], exp_result['location'])
result_name = "{}_{}.json".format(exp_result['model_name'], task)
result_dir = exp_result['result_path']
onlyfiles = [f for f in listdir(result_dir) if isfile(join(result_dir, f))]
if result_name in onlyfiles:
with open(join(result_dir, result_name), 'r') as f:
results = json.load(f)
with open(join(result_dir, result_name), 'w') as f:
results[exp_key] = exp_result
json.dump(results, f)
print("Append exp result at {} with key {}".format(result_name, exp_key))
else:
results = {}
with open(join(result_dir, result_name), 'w') as f:
results[exp_key] = exp_result
json.dump(results, f)
print("Create new exp result at {} with key {}".format(result_name, exp_key))
def prepare(params, _):
task = params['current_task']
model = params['model']
location = params['location']
model.prepare(task, location)
def batcher(params, batch):
model = params['model']
location = params['location']
head_size = params['head_size']
sentences = [' '.join(s) for s in batch]
embedding = model.encode(sentences, params['heads'], head_size, location)
return embedding
def experiment(model, task, args):
ts = time.time()
params = vars(args)
params['model'] = model
params['classifier'] = {'nhid': args.nhid,
'optim': args.optim,
'tenacity': args.tenacity,
'epoch_size': args.epoch_size,
'dropout': args.dropout,
'batch_size': args.cbatch_size}
params['heads'] = get_top_heads(args.model_name, task)[:args.num_head] # select first top n-heads
se = senteval.engine.SE(params, batcher, prepare)
result = se.eval([task])
if task in ['SICKRelatedness']:
params['devpearson'] = result[task]['devpearson']
params['pearson'] = result[task]['pearson']
elif task in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark']:
params['pearson'] = result[task]['all']['pearson']['mean']
else:
params['devacc'] = result[task]['devacc']
params['acc'] = result[task]['acc']
model.save_cache(task, args.location)
te = time.time()
print("result: {}, took: {:3.1f} sec".format(result, te - ts))
return params
|
[
"pandas.DataFrame",
"json.dump",
"json.load",
"sys.path.insert",
"time.time",
"senteval.engine.SE",
"os.path.join",
"os.listdir"
] |
[((194, 223), 'sys.path.insert', 'sys.path.insert', (['(0)', 'PATH_BERT'], {}), '(0, PATH_BERT)\n', (209, 223), False, 'import sys\n'), ((390, 423), 'sys.path.insert', 'sys.path.insert', (['(0)', 'PATH_SENTEVAL'], {}), '(0, PATH_SENTEVAL)\n', (405, 423), False, 'import sys\n'), ((3186, 3197), 'time.time', 'time.time', ([], {}), '()\n', (3195, 3197), False, 'import time\n'), ((3686, 3730), 'senteval.engine.SE', 'senteval.engine.SE', (['params', 'batcher', 'prepare'], {}), '(params, batcher, prepare)\n', (3704, 3730), False, 'import senteval\n'), ((4214, 4225), 'time.time', 'time.time', ([], {}), '()\n', (4223, 4225), False, 'import time\n'), ((1050, 1075), 'pandas.DataFrame', 'pd.DataFrame', (['list_result'], {}), '(list_result)\n', (1062, 1075), True, 'import pandas as pd\n'), ((691, 708), 'os.listdir', 'listdir', (['dir_path'], {}), '(dir_path)\n', (698, 708), False, 'from os import listdir\n'), ((931, 948), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (940, 948), False, 'import json\n'), ((2056, 2075), 'os.listdir', 'listdir', (['result_dir'], {}), '(result_dir)\n', (2063, 2075), False, 'from os import listdir\n'), ((2224, 2236), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2233, 2236), False, 'import json\n'), ((2352, 2373), 'json.dump', 'json.dump', (['results', 'f'], {}), '(results, f)\n', (2361, 2373), False, 'import json\n'), ((2602, 2623), 'json.dump', 'json.dump', (['results', 'f'], {}), '(results, f)\n', (2611, 2623), False, 'import json\n'), ((719, 736), 'os.path.join', 'join', (['dir_path', 'f'], {}), '(dir_path, f)\n', (723, 736), False, 'from os.path import isfile, join\n'), ((825, 849), 'os.path.join', 'join', (['dir_path', 'filename'], {}), '(dir_path, filename)\n', (829, 849), False, 'from os.path import isfile, join\n'), ((2086, 2105), 'os.path.join', 'join', (['result_dir', 'f'], {}), '(result_dir, f)\n', (2090, 2105), False, 'from os.path import isfile, join\n'), ((2160, 2189), 'os.path.join', 'join', (['result_dir', 'result_name'], {}), '(result_dir, result_name)\n', (2164, 2189), False, 'from os.path import isfile, join\n'), ((2256, 2285), 'os.path.join', 'join', (['result_dir', 'result_name'], {}), '(result_dir, result_name)\n', (2260, 2285), False, 'from os.path import isfile, join\n'), ((2506, 2535), 'os.path.join', 'join', (['result_dir', 'result_name'], {}), '(result_dir, result_name)\n', (2510, 2535), False, 'from os.path import isfile, join\n')]
|
def test_csvfinder():
"""Tests that the finder finds all the correct files
:returns test_files: List of .csv files
"""
from Reader import csvfinder
test_files = csvfinder()
assert test_files.count('poorform') == 0
assert test_files.count('wrongend') == 0
assert test_files.count('false.csv') == 1
assert test_files.count('mess.csv') == 1
assert test_files.count('poorform.csv') == 1
assert test_files.count('tab.csv') == 1
assert test_files.count('test_data1.csv') == 1
assert test_files.count('words.csv') == 1
assert test_files.count('test1.csv') == 1
assert test_files.count('test2.csv') == 1
return test_files
def test_csvchecker():
"""Tests that the check function discards and keeps correct files
:returns check_files: Dictionary of valid csv files
"""
from Reader import csvchecker
test_files = test_csvfinder()
check_files = csvchecker(test_files)
assert 'false.csv' not in check_files
assert 'mess.csv' not in check_files
assert 'poorform.csv' not in check_files
assert 'tab.csv' not in check_files
assert 'false.csv' not in check_files
assert 'test2.csv' not in check_files
assert 'test1.csv' in check_files
assert 'test_data1.csv' in check_files
assert 'words.csv' in check_files
return check_files
def test_floatcheck():
"""Tests to ensure that certain files are removed from the csv file list
"""
from Reader import floatcheck
check_files = test_csvchecker()
float_files = floatcheck(check_files)
assert 'words.csv' not in float_files
assert 'test1.csv' not in float_files
assert 'test_data1.csv' in float_files
|
[
"Reader.csvchecker",
"Reader.floatcheck",
"Reader.csvfinder"
] |
[((182, 193), 'Reader.csvfinder', 'csvfinder', ([], {}), '()\n', (191, 193), False, 'from Reader import csvfinder\n'), ((926, 948), 'Reader.csvchecker', 'csvchecker', (['test_files'], {}), '(test_files)\n', (936, 948), False, 'from Reader import csvchecker\n'), ((1542, 1565), 'Reader.floatcheck', 'floatcheck', (['check_files'], {}), '(check_files)\n', (1552, 1565), False, 'from Reader import floatcheck\n')]
|
import sys
import yaml
from jsonschema import validate
if len(sys.argv) != 3:
raise RuntimeError(f'Please provide validation file and json schema file as arguments to validate_schemas.py')
merged_file = sys.argv[1]
json_schema_file = sys.argv[2]
with open(json_schema_file) as f:
_META_VAL_JSONSCHEMA = yaml.safe_load(f)
# _META_VAL_JSONSCHEMA = {
# 'type': 'object',
# 'definitions': {
# 'validator_set': {
# 'type': 'object',
# # validate values only
# 'additionalProperties': {
# 'type': 'object',
# 'properties': {
# 'key_metadata': {
# 'type': 'object',
# 'additionalProperties': {
# 'type': ['number', 'boolean', 'string', 'null']
# }
# },
# 'validators': {
# 'type': 'array',
# 'items': {
# 'type': 'object',
# 'properties': {
# 'module': {'type': 'string'},
# 'callable_builder': {'type': 'string'},
# 'parameters': {'type': 'object'}
# },
# 'additionalProperties': False,
# 'required': ['module', 'callable_builder']
# }
# }
# },
# 'required': ['validators']
# }
# },
# 'additionalProperties': False,
# },
# 'properties': {
# 'validators': {'$ref': '#/definitions/validator_set'},
# 'prefix_validators': {'$ref': '#/definitions/validator_set'},
# },
# 'additionalProperties': False
# }
files = [
"validation_files/ENIGMA-noops.yml",
"validation_files/SESAR-noops.yml",
"validation_files/SESAR.yml",
"validation_files/ENIGMA.yml",
"validation_files/miscellaneous.yml",
merged_file,
"metadata_validation.yml"
]
for file in files:
with open(file) as f:
cfg = yaml.safe_load(f)
validate(instance=cfg, schema=_META_VAL_JSONSCHEMA)
|
[
"jsonschema.validate",
"yaml.safe_load"
] |
[((314, 331), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (328, 331), False, 'import yaml\n'), ((2206, 2257), 'jsonschema.validate', 'validate', ([], {'instance': 'cfg', 'schema': '_META_VAL_JSONSCHEMA'}), '(instance=cfg, schema=_META_VAL_JSONSCHEMA)\n', (2214, 2257), False, 'from jsonschema import validate\n'), ((2184, 2201), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (2198, 2201), False, 'import yaml\n')]
|
from __future__ import print_function
import torch
import numpy as np
import os
# from torch_scatter import scatter_add
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
MESH_EXTENSIONS = [
'.obj',
]
def is_mesh_file(filename):
return any(filename.endswith(extension) for extension in MESH_EXTENSIONS)
def pad(input_arr, target_length, val=0, dim=1):
shp = input_arr.shape
npad = [(0, 0) for _ in range(len(shp))]
npad[dim] = (0, target_length - shp[dim])
return np.pad(input_arr, pad_width=npad, mode='constant', constant_values=val)
def seg_accuracy(predicted, ssegs, meshes):
correct = 0
ssegs = ssegs.squeeze(-1)
correct_mat = ssegs.gather(2, predicted.cpu().unsqueeze(dim=2))
for mesh_id, mesh in enumerate(meshes):
correct_vec = correct_mat[mesh_id, :mesh.edges_count, 0]
edge_areas = torch.from_numpy(mesh.get_edge_areas())
correct += (correct_vec.float() * edge_areas).sum()
return correct
def intersection_over_union(preds, target, num_classes):
preds, target = torch.nn.functional.one_hot(preds, num_classes), torch.nn.functional.one_hot(target, num_classes)
iou = torch.zeros(num_classes, dtype=torch.float32)
for idx, pred in enumerate(preds):
i = (pred & target[idx]).sum(dim=0)
u = (pred | target[idx]).sum(dim=0)
iou = iou.add(i.cpu().to(torch.float) / u.cpu().to(torch.float))
return iou
def mean_iou_calc(pred, target, num_classes):
#Removal of padded labels marked with -1
slimpred = []
slimtarget = []
for batch in range(pred.shape[0]):
if (target[batch] == -1).any():
slimLabels = target[batch][target[batch]!=-1]
slimtarget.append(slimLabels)
slimpred.append(pred[batch][:slimLabels.size()[0]])
pred = torch.stack(slimpred,0)
target = torch.stack(slimtarget, 0)
iou = intersection_over_union(pred, target, num_classes)
mean_iou = iou.mean(dim=-1)
return mean_iou, iou
def print_network(net):
"""Print the total number of parameters in the network
Parameters:
network
"""
print('---------- Network initialized -------------')
num_params = 0
for param in net.parameters():
num_params += param.numel()
print('[Network] Total number of parameters : %.3f M' % (num_params / 1e6))
print('-----------------------------------------------')
def get_heatmap_color(value, minimum=0, maximum=1):
minimum, maximum = float(minimum), float(maximum)
ratio = 2 * (value - minimum) / (maximum - minimum)
b = int(max(0, 255 * (1 - ratio)))
r = int(max(0, 255 * (ratio - 1)))
g = 255 - b - r
return r, g, b
def normalize_np_array(np_array):
min_value = np.min(np_array)
max_value = np.max(np_array)
return (np_array - min_value) / (max_value - min_value)
def calculate_entropy(np_array):
entropy = 0
np_array /= np.sum(np_array)
for a in np_array:
if a != 0:
entropy -= a * np.log(a)
entropy /= np.log(np_array.shape[0])
return entropy
def pad_with(vector, pad_width, iaxis, kwargs):
pad_value = kwargs.get('padder', 10)
vector[:pad_width[0]] = pad_value
vector[-pad_width[1]:] = pad_value
def myindexrowselect(groups, mask_index, device):
sparseIndices = groups._indices()
newIndices = []
for i, value in enumerate(mask_index):
#Get index from relevant indices
index = (sparseIndices[0] == value).nonzero()
#Get rows by index
sparseRow = [sparseIndices[:, value] for value in index]
sparseRow = torch.cat(sparseRow,1)[1]
singleRowIndices = torch.squeeze(torch.full((1,len(sparseRow)),i, dtype=torch.long),0).to(sparseRow.device)
indices = torch.stack((singleRowIndices,sparseRow))
newIndices.append(indices)
allNewIndices = torch.cat(newIndices,1)
#Create new tensor
groups = torch.sparse_coo_tensor(indices=allNewIndices,
values=torch.ones(allNewIndices.shape[1], dtype=torch.float),
size=(len(mask_index), groups.shape[1]))
return groups
|
[
"numpy.pad",
"torch.ones",
"numpy.sum",
"torch.stack",
"numpy.log",
"os.makedirs",
"os.path.exists",
"torch.nn.functional.one_hot",
"torch.cat",
"numpy.min",
"numpy.max",
"torch.zeros"
] |
[((522, 593), 'numpy.pad', 'np.pad', (['input_arr'], {'pad_width': 'npad', 'mode': '"""constant"""', 'constant_values': 'val'}), "(input_arr, pad_width=npad, mode='constant', constant_values=val)\n", (528, 593), True, 'import numpy as np\n'), ((1190, 1235), 'torch.zeros', 'torch.zeros', (['num_classes'], {'dtype': 'torch.float32'}), '(num_classes, dtype=torch.float32)\n', (1201, 1235), False, 'import torch\n'), ((1850, 1874), 'torch.stack', 'torch.stack', (['slimpred', '(0)'], {}), '(slimpred, 0)\n', (1861, 1874), False, 'import torch\n'), ((1887, 1913), 'torch.stack', 'torch.stack', (['slimtarget', '(0)'], {}), '(slimtarget, 0)\n', (1898, 1913), False, 'import torch\n'), ((2780, 2796), 'numpy.min', 'np.min', (['np_array'], {}), '(np_array)\n', (2786, 2796), True, 'import numpy as np\n'), ((2813, 2829), 'numpy.max', 'np.max', (['np_array'], {}), '(np_array)\n', (2819, 2829), True, 'import numpy as np\n'), ((2957, 2973), 'numpy.sum', 'np.sum', (['np_array'], {}), '(np_array)\n', (2963, 2973), True, 'import numpy as np\n'), ((3068, 3093), 'numpy.log', 'np.log', (['np_array.shape[0]'], {}), '(np_array.shape[0])\n', (3074, 3093), True, 'import numpy as np\n'), ((151, 171), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (165, 171), False, 'import os\n'), ((181, 198), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (192, 198), False, 'import os\n'), ((1082, 1129), 'torch.nn.functional.one_hot', 'torch.nn.functional.one_hot', (['preds', 'num_classes'], {}), '(preds, num_classes)\n', (1109, 1129), False, 'import torch\n'), ((1131, 1179), 'torch.nn.functional.one_hot', 'torch.nn.functional.one_hot', (['target', 'num_classes'], {}), '(target, num_classes)\n', (1158, 1179), False, 'import torch\n'), ((3807, 3849), 'torch.stack', 'torch.stack', (['(singleRowIndices, sparseRow)'], {}), '((singleRowIndices, sparseRow))\n', (3818, 3849), False, 'import torch\n'), ((3909, 3933), 'torch.cat', 'torch.cat', (['newIndices', '(1)'], {}), '(newIndices, 1)\n', (3918, 3933), False, 'import torch\n'), ((3647, 3670), 'torch.cat', 'torch.cat', (['sparseRow', '(1)'], {}), '(sparseRow, 1)\n', (3656, 3670), False, 'import torch\n'), ((4061, 4114), 'torch.ones', 'torch.ones', (['allNewIndices.shape[1]'], {'dtype': 'torch.float'}), '(allNewIndices.shape[1], dtype=torch.float)\n', (4071, 4114), False, 'import torch\n'), ((3043, 3052), 'numpy.log', 'np.log', (['a'], {}), '(a)\n', (3049, 3052), True, 'import numpy as np\n')]
|
from rest_framework import routers
from kitsune.questions.api import QuestionViewSet, AnswerViewSet
router = routers.SimpleRouter()
router.register(r"question", QuestionViewSet)
router.register(r"answer", AnswerViewSet)
urlpatterns = router.urls
|
[
"rest_framework.routers.SimpleRouter"
] |
[((111, 133), 'rest_framework.routers.SimpleRouter', 'routers.SimpleRouter', ([], {}), '()\n', (131, 133), False, 'from rest_framework import routers\n')]
|
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import datetime
import logging
import time
import concurrent.futures
from dataclasses import dataclass, field
from typing import List, Tuple, Dict
import re
import netaddr
import ConfigParser
import os
logger = logging.getLogger(__name__)
MAX_THREADS = 14 # Get max number of threads for multi-threading
# Read credentials for tenable
Config = ConfigParser.ConfigParser()
Config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)),'settings.ini'))
tenable_client_id = Config.get('Settings', 'Tenable_Client_Id')
tenable_secret_id = Config.get('Settings', 'Tenable_Secret_Id')
tenable_gcp_tag = Config.get('Settings', 'Tenable_GCP_tag')
tenable_workstations_tag = Config.get('Settings', 'Tenable_Workstation_tag')
tenable_api = "https://cloud.tenable.com"
# Generate session with max of 3 retries and interval of 1 second
def session_generator() -> requests.sessions.Session:
session = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
@dataclass # A class to contain all the necessary fields to create report
class TenableVulnerability:
plugin_name: str
resolution: str
additional_links: List[str]
ip: str
dns: str
os: str
cves: List[str]
plugin_family: str
exploit_available: bool
cvss_score: float
temporal_score: float
vpr_score: float
zone: str = field(init=False, default='') # Critical/Non-critical Asset
nessus_criticiality: str = field(init=False, default='')
vulnerability_type: str = field(init=False, default='')
actual_criticality: str = field(init=False, default='')
host_risk: str = field(init=False, default='')
platform: str = field(init=False, default='')
def __post_init__(self):
self.get_type()
self.nessus_criticiality_insight()
self.platform_based_on_os()
self.modify_solution()
def get_type(self):
self.vulnerability_type = 'config'
if self.plugin_family.lower() == 'Windows : Microsoft Bulletins'.lower():
self.vulnerability_type = 'package'
elif 'update ' in self.resolution.lower() or 'Update Set' in self.plugin_name or 'upgrade ' in self.resolution.lower() or 'MS KB' in self.plugin_name:
self.vulnerability_type = 'package'
elif 'Apply the client registry key workaround and the server registry key workaround suggested by Microsoft in the advisory.' == self.resolution:
self.vulnerability_type = 'config'
elif re.search('.* KB\d{3,} .*', self.resolution, flags=re.I) or 'patch' in self.resolution:
self.vulnerability_type = 'package'
def platform_based_on_os(self):
if 'windows' in self.os.lower():
self.platform = 'Windows'
elif 'mac' in self.os.lower():
self.platform = 'Mac'
else:
self.platform = 'Linux'
def modify_solution(self):
if self.vulnerability_type == 'package' and self.platform == 'Windows':
if 'Microsoft has released the following security updates to address this issue:' in self.resolution or 'Apply the following security updates ' in self.resolution or 'Apply Service Stack ' in self.resolution or 'Microsoft has released KB' in self.resolution or 'Install Microsoft KB' in self.resolution:
get_security_kb = re.findall(r"KB\d{4,}", self.resolution, flags=re.IGNORECASE)
if not get_security_kb:
get_security_kb = re.findall(r"\d{4,}", self.resolution, flags=re.IGNORECASE)
get_security_kb = ["KB%s" % security_kb for security_kb in get_security_kb]
if get_security_kb:
self.resolution = ','.join(get_security_kb).replace("'", '').replace('"', '').replace(' ', '')
elif 'Apply '.lower() in self.resolution.lower():
#
get_security_kb = re.findall(r".*Security .* (KB\d{4,}) or Cumulative.*", self.resolution, flags=re.IGNORECASE)
if not get_security_kb:
# Apply security update KB4022715 as well as refer to the KB article for additional information
get_security_kb = re.findall(r".*Security .* (KB\d{4,})", self.resolution, flags=re.IGNORECASE)
if not get_security_kb:
# Apply Cumulative Update KB4056890 or KB4057142 as well as || Apply Cumulative Update KB4493509 *
get_security_kb = re.findall(r".*Cumulative .* (KB\d{4,})", self.resolution, flags=re.IGNORECASE)
if get_security_kb:
self.resolution = ','.join(get_security_kb).replace("'", '').replace('"', '').replace(' ', '')
elif 'MS' in self.plugin_name and 'KB' not in self.resolution:
get_security_bulletin_number = re.findall(r"^MS\d{2,}-\d{3,}", self.plugin_name, flags=re.IGNORECASE)
if len(get_security_bulletin_number) == 1:
year = get_security_bulletin_number[0].split('-')[0].replace('MS', '')
link = "https://docs.microsoft.com/en-us/security-updates/SecurityBulletins/20%s/%s" % (year, get_security_bulletin_number[0].lower())
self.resolution = link
elif 'ADV' in self.plugin_name and ' KB' not in self.resolution:
get_ADV = re.findall(r"^ADV\d{4,}", self.plugin_name, flags=re.IGNORECASE)
if len(get_ADV) == 1:
ADV = get_ADV[0].split(':')[0]
link = "https://portal.msrc.microsoft.com/en-US/security-guidance/advisory/%s" % ADV.upper()
self.resolution = link
elif ('Microsoft has released a set of ' in self.resolution or 'Apply the appropriate patches according to the' in self.resolution or 'Microsoft has released security updates for ' in self.resolution or 'Microsoft has released a security update to ' in self.resolution):
self.resolution = self.additional_links[0]
elif self.vulnerability_type == 'package' and self.platform == 'Linux':
"""
Modify Linux Solution that you want in the report as per your Linux box
"""
if 'Update ' in self.resolution:
self.resolution = 'yum update -y '
for cve in self.cves:
self.resolution = '%s --cve %s ' % (self.resolution, cve)
def nessus_criticiality_insight(self):
if self.exploit_available:
if self.vpr_score > 0.0:
if self.vpr_score >= 7:
self.nessus_criticiality = 'High'
elif self.vpr_score >= 4:
self.nessus_criticiality = 'Medium'
else:
self.nessus_criticiality = 'Low'
elif self.temporal_score > 0.0:
if self.temporal_score >= 7:
self.nessus_criticiality = 'High'
elif self.temporal_score >= 4:
self.nessus_criticiality = 'Medium'
else:
self.nessus_criticiality = 'Low'
elif self.cvss_score > 0.0:
if self.cvss_score >= 7:
self.nessus_criticiality = 'High'
elif self.cvss_score >= 4:
self.nessus_criticiality = 'Medium'
else:
self.nessus_criticiality = 'Low'
else:
if self.vpr_score > 0.0:
if self.vpr_score >= 7:
self.nessus_criticiality = 'Medium'
elif self.vpr_score >= 4:
self.nessus_criticiality = 'Medium'
else:
self.nessus_criticiality = 'Low'
elif self.temporal_score > 0.0:
if self.temporal_score >= 7:
self.nessus_criticiality = 'Medium'
elif self.temporal_score >= 4:
self.nessus_criticiality = 'Medium'
else:
self.nessus_criticiality = 'Low'
elif self.cvss_score > 0.0:
if self.cvss_score >= 7:
self.nessus_criticiality = 'Medium'
elif self.cvss_score >= 4:
self.nessus_criticiality = 'Medium'
else:
self.nessus_criticiality = 'Low'
def get_host_risk(self):
"""
Use a combination of host dns (self.dns) and the zone that host is in to define host risk
For example:
haproxy_box = re.search('.*haproxy.*', self.dns, flags=re.IGNORECASE)
web_box = re.search('.*web.*', self.dns, flags=re.IGNORECASE)
app_box = re.search('.*app.*', self.dns, flags=re.IGNORECASE)
proxy_box = re.search('^proxy.*', self.dns, flags=re.IGNORECASE)
if self.zone == 'DMZ':
if self.platform == 'Linux':
if web_box or haproxy_box:
self.host_risk = 'High'
elif app_box:
self.host_risk = 'Medium'
elif self.zone == 'Secure':
if self.platform == 'Linux':
if app_box:
self.host_risk = 'High'
elif proxy_box:
self.host_risk = 'Medium'
"""
def actual_criticality_insight(self):
if self.host_risk == 'High':
if self.nessus_criticiality == 'Low':
self.actual_criticality = 'Medium'
elif self.nessus_criticiality == 'Medium':
self.actual_criticality = 'High'
elif self.nessus_criticiality == 'High':
self.actual_criticality = 'High'
elif self.host_risk == 'Medium':
if self.nessus_criticiality == 'Low':
self.actual_criticality = 'Medium'
elif self.nessus_criticiality == 'Medium':
self.actual_criticality = 'Medium'
elif self.nessus_criticiality == 'High':
self.actual_criticality = 'High'
elif self.host_risk == 'Low':
if self.nessus_criticiality == 'Low':
self.actual_criticality = 'Low'
elif self.nessus_criticiality == 'Medium':
self.actual_criticality = 'Low'
elif self.nessus_criticiality == 'High':
self.actual_criticality = 'Medium'
else:
self.actual_criticality = 'Unknown'
# Initiate download of all vulnerable assets
def initiate_download_vulnerabilities(tag: str) -> str:
logger.info("Initiating download of %s vulnerabilities seen in the last 15 days" % tag)
uuid = None
headers = {'X-ApiKeys': 'accessKey=%s; secretKey=%s' % (tenable_client_id, tenable_secret_id),
'Content-Type': 'application/json'}
session = session_generator()
data = {
"num_assets": 1000,
"filters": {
"severity": ["low", "medium", "high", "critical"],
"since": int((datetime.datetime.now() - datetime.timedelta(days=15)).strftime("%s")),
"tag.Source": [tag]
}
}
resp = session.post("%s/vulns/export" % tenable_api, headers=headers, json=data)
response = resp.json()
if resp.ok:
uuid = response['export_uuid']
elif resp.status_code == 429:
logger.warning("Exceed rate limit.")
time.sleep(60)
# TO DO:
# Check header to see if spits out retry.
# print(resp.header)
uuid = initiate_download_vulnerabilities(tag)
else:
logger.error('ERROR %s: %s' % (resp.status_code, resp.text))
logger.error('Unable to make rest call to initiate download all %s vulnerabilities' % tag)
return uuid
# Check if report is ready for download
def check_vulnerabilities_download_status(uuid: str) -> Tuple[str,List[int]]:
logger.info("Checking download status of vulnerabilities for file %s" % uuid)
headers = {'X-ApiKeys': 'accessKey=%s; secretKey=%s' % (tenable_client_id, tenable_secret_id),
'Content-Type': 'application/json'}
session = session_generator()
status = None
chunks = []
resp = session.get("%s/vulns/export/%s/status" % (tenable_api, uuid), headers=headers)
if resp.ok:
response = resp.json()
status = response['status']
if status == 'FINISHED':
chunks.extend(response['chunks_available'])
elif resp.status_code == 429:
logger.warning("Exceed rate limit.")
time.sleep(60)
# TO DO:
# Check header to see if spits out retry.
# print(resp.header)
status, chunks = check_vulnerabilities_download_status(uuid)
else:
logger.error('ERROR %s: %s' % (resp.status_code, resp.text))
logger.error('Unable to make rest call to get status of file download %s' % uuid)
return status, chunks
def parse_vulnerabilities(vulnerability: Dict) -> TenableVulnerability:
if 'exploit_available' in vulnerability['plugin'] and vulnerability['plugin']['exploit_available']:
exploit_available = True
else:
exploit_available = False
if 'cvss3_temporal_score' in vulnerability['plugin']:
temporal_score = vulnerability['plugin']['cvss3_temporal_score']
elif 'cvss_temporal_score' in vulnerability['plugin']:
temporal_score = vulnerability['plugin']['cvss_temporal_score']
else:
temporal_score = 0.0
if 'cvss3_base_score' in vulnerability['plugin']:
base_score = vulnerability['plugin']['cvss3_base_score']
elif 'cvss_base_score' in vulnerability['plugin']:
base_score = vulnerability['plugin']['cvss_base_score']
else:
base_score = 0.0
if 'vpr' in vulnerability['plugin']:
if 'score' in vulnerability['plugin']['vpr']:
vpr = vulnerability['plugin']['vpr']['score']
else:
vpr = 0.0
else:
vpr = 0.0
if 'see_also' in vulnerability['plugin']:
additional_links = [vulnerability['plugin']['see_also'][0]]
else:
additional_links = []
if 'cve' in vulnerability['plugin']:
cves = vulnerability['plugin']['cve']
else:
cves = []
vulnobj = TenableVulnerability(
vulnerability['plugin']['name'],
vulnerability['plugin']['solution'].replace('\r', '').replace('\n', ' '),
additional_links,
vulnerability['asset']['ipv4'],
vulnerability['asset']['fqdn'] or vulnerability['asset']['hostname'],
vulnerability['asset']['operating_system'][0] or '',
cves,
vulnerability['plugin']['family'],
exploit_available,
base_score,
temporal_score,
vpr
)
return vulnobj
# Get vulnerability of only those whose agents were last seen in 30 days
def download_vulnerabilities(uuid: str, chunk_id: int, agents: List[str]) -> List[TenableVulnerability]:
vulnerabilities = []
logger.info("Fetching list of vulnerabilities for chunk %d" % chunk_id)
headers = {'X-ApiKeys': 'accessKey=%s; secretKey=%s' % (tenable_client_id, tenable_secret_id),
'Content-Type': 'application/json'}
session = session_generator()
resp = session.get("%s/vulns/export/%s/chunks/%d" % (tenable_api, uuid, chunk_id), headers=headers)
if resp.ok:
response = resp.json()
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
fs = [executor.submit(parse_vulnerabilities, vulnerability) for vulnerability in response if (agents and 'agent_uuid' in vulnerability['asset'] and vulnerability['asset']['agent_uuid'] in agents) or not agents]
for future in concurrent.futures.as_completed(fs):
vulnerabilities.append(future.result())
elif resp.status_code == 429:
logger.warning("Exceed rate limit.")
time.sleep(60)
# TO DO:
# Check header to see if spits out retry.
# print(resp.header)
vulnerabilities = download_vulnerabilities(uuid, chunk_id, agents)
else:
logger.error('ERROR %s: %s' % (resp.status_code, resp.text))
logger.error('Unable to make rest call to download vulnerabilities for chunk %d' % chunk_id)
return vulnerabilities
# Get any scanner id as all devices agents are associated with all scanners
def get_any_scanner_id() -> int:
headers = {'X-ApiKeys': 'accessKey=%s; secretKey=%s' % (tenable_client_id, tenable_secret_id),
'Content-Type': 'application/json'}
session = session_generator()
r = session.get("%s/scanners" % tenable_api, headers=headers)
if r.ok:
response = r.json()
scanner_id = response['scanners'][0]['id']
logger.info("Received Tenable Scanner ID")
return scanner_id
else:
logger.error('Unable to make rest call to get scanner id')
logger.error('ERROR %s: %s' % (r.status_code, r.text))
return 0
# Fetch the groups (id and text) associated with the scanner
def get_agent_groups(scanner_id: int) -> Dict[int, str]:
logger.info("Fetching all agent groups...")
agent_group_ids = {}
headers = {'X-ApiKeys': 'accessKey=%s; secretKey=%s' % (tenable_client_id, tenable_secret_id),
'Content-Type': 'application/json'}
session = session_generator()
agent_group_request = session.get("%s/scanners/%d/agent-groups" % (tenable_api, scanner_id), headers=headers)
if agent_group_request.ok:
agent_group_response = agent_group_request.json()
for agent_group in agent_group_response['groups']:
agent_group_ids[agent_group['id']] = agent_group['name']
logger.info("Completed collecting all agent groups")
return agent_group_ids
# Fetches all agents in a particular agent group
def get_agents_in_agent_group(scanner_id: int, group_id: int) -> List[str]:
agents = []
offset = 0
session = session_generator()
logger.info("Getting all agents belonging to group id %d", group_id)
while True:
headers = {'X-ApiKeys': 'accessKey=%s; secretKey=%s' % (tenable_client_id, tenable_secret_id),
'Content-Type': 'application/json'}
agent_request = session.get(
"%s/scanners/%d/agent-groups/%s?limit=5000&offset=%d" % (tenable_api, scanner_id, group_id, offset),
headers=headers)
if agent_request.ok:
agent_response = agent_request.json()
for agent in agent_response['agents']:
if 'last_scanned' in agent and agent['last_scanned'] and agent['last_scanned'] >= int((datetime.datetime.now() - datetime.timedelta(days=30)).strftime("%s")):
agents.append(agent['uuid'].replace('-', ''))
# Tackle pagination
if agent_response['pagination']['total'] - offset <= 5000:
break
else:
offset = offset + 5000
else:
logger.error('Error %d:%s', agent_request.status_code, agent_request.text)
return agents
# Fetch all gcp agents
def get_gcp_agents(scanner_id: int) -> List[str]:
'''
Fetch Agents from Groups for GCP based on Agent Group Name
'''
agents = []
logger.info("Getting all gcp servers")
agent_group_ids = get_agent_groups(scanner_id)
if agent_group_ids:
# Map based on the value to the group id and fetch agents accordingly
for group_id in agent_group_ids:
if 'GCP' in agent_group_ids[group_id]:
agents.extend(get_agents_in_agent_group(scanner_id, group_id))
else:
pass
agents = list(set(agents))
logger.info('Found %d gcp agents' % len(agents))
return agents
# Fetch all workstation agents
def get_workstation_agents(scanner_id:int) -> List[str]:
'''
Fetch Agents from Groups for Workstations based on Agent Group Name
'''
agents = []
logger.info("Getting all workstation agents")
agent_group_ids = get_agent_groups(scanner_id)
if agent_group_ids:
# Map based on the value to the group id and fetch agents accordingly
for group_id in agent_group_ids:
if 'Workstations' in agent_group_ids[group_id]:
agents.extend(get_agents_in_agent_group(scanner_id, group_id))
else:
pass
agents = list(set(agents))
logger.info('Found %d workstation agents' % len(agents))
return agents
def get_rerouted_url(link):
link_dict = {}
resp = requests.get(link)
link_dict[link] = resp.url
return link_dict
def mapping_security_zone(iplist: List[str]) -> Dict[str, str]:
"""
Fake list of ranges for GCP
"""
dmz_range = list(netaddr.IPNetwork('192.168.0.0/24'))
secure_range = list(netaddr.IPNetwork('192.168.2.0/22'))
ip_zone = {}
for ip in iplist:
if netaddr.IPAddress(ip) in dmz_range:
ip_zone[ip] = 'DMZ'
elif netaddr.IPAddress(ip) in secure_range:
ip_zone[ip] = 'Secure'
return ip_zone
def fetch_gcp_vulnerabilities() -> List[TenableVulnerability]:
agents = []
vulnerabilities = []
uuid = initiate_download_vulnerabilities(tenable_gcp_tag)
if uuid is not None:
status, chunks = check_vulnerabilities_download_status(uuid)
while status != 'FINISHED':
time.sleep(10)
status, chunks = check_vulnerabilities_download_status(uuid)
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
fs = [executor.submit(download_vulnerabilities, uuid, chunk_id, agents) for chunk_id in chunks]
for future in concurrent.futures.as_completed(fs):
if future.result() is not None:
vulnerabilities.extend(future.result())
logger.info('Mapping info links to rerouted link')
links = []
for vulnerability in vulnerabilities:
links.extend(vulnerability.additional_links)
links = list(set(links))
map_link_to_its_rerouted_url = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=1000) as executor:
for link_dict in executor.map(get_rerouted_url, links):
map_link_to_its_rerouted_url.update(link_dict)
for vulnerability in vulnerabilities:
temp_links_holder = []
for link in vulnerability.additional_links:
if link in map_link_to_its_rerouted_url:
temp_links_holder.append(map_link_to_its_rerouted_url[link])
vulnerability.additional_links = temp_links_holder
# Map zones to IPs
vulnerable_ips = list(set(vulnerability.ip for vulnerability in vulnerabilities))
logging.info('Mapping security zone to %d IPs' % len(vulnerable_ips))
ip_zone_mapping = mapping_security_zone(vulnerable_ips)
for vulnerability in vulnerabilities:
if vulnerability.ip in ip_zone_mapping:
vulnerability.zone = ip_zone_mapping[vulnerability.ip]
logger.info("Found %d IPs that are not tagged to zones" % len(set([vulnerability.ip for vulnerability in vulnerabilities if not vulnerability.zone])))
for vulnerability in vulnerabilities:
vulnerability.get_host_risk()
logging.info('Getting actual criticality of the vulnerabilities')
for vulnerability in vulnerabilities:
vulnerability.actual_criticality_insight()
logger.info('Found %d vulnerabilities for GCP' % len(vulnerabilities))
return vulnerabilities
def fetch_workstation_vulnerabilities() -> List[TenableVulnerability]:
vulnerabilities = []
scanner_id = get_any_scanner_id()
if scanner_id > 0:
agents = get_workstation_agents(scanner_id)
uuid = initiate_download_vulnerabilities(tenable_workstations_tag)
if uuid is not None and agents:
status, chunks = check_vulnerabilities_download_status(uuid)
while status != 'FINISHED':
time.sleep(10)
status, chunks = check_vulnerabilities_download_status(uuid)
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
fs = [executor.submit(download_vulnerabilities, uuid, chunk_id, agents) for chunk_id in chunks]
for future in concurrent.futures.as_completed(fs):
if future.result():
vulnerabilities.extend(future.result())
logger.info('Mapping info links to rerouted link')
links = []
for vulnerability in vulnerabilities:
links.extend(vulnerability.additional_links)
links = list(set(links))
map_link_to_its_rerouted_url = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=1000) as executor:
for link_dict in executor.map(get_rerouted_url, links):
map_link_to_its_rerouted_url.update(link_dict)
for vulnerability in vulnerabilities:
temp_links_holder = []
for link in vulnerability.additional_links:
if link in map_link_to_its_rerouted_url:
temp_links_holder.append(map_link_to_its_rerouted_url[link])
vulnerability.additional_links = temp_links_holder
for vulnerability in vulnerabilities:
vulnerability.actual_criticality_insight()
logger.info('Found %d vulnerabilities for workstations' % len(vulnerabilities))
return vulnerabilities
|
[
"requests.packages.urllib3.util.retry.Retry",
"requests.adapters.HTTPAdapter",
"requests.Session",
"os.path.dirname",
"netaddr.IPAddress",
"datetime.datetime.now",
"time.sleep",
"dataclasses.field",
"logging.info",
"re.findall",
"datetime.timedelta",
"requests.get",
"netaddr.IPNetwork",
"ConfigParser.ConfigParser",
"re.search",
"logging.getLogger"
] |
[((325, 352), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (342, 352), False, 'import logging\n'), ((460, 487), 'ConfigParser.ConfigParser', 'ConfigParser.ConfigParser', ([], {}), '()\n', (485, 487), False, 'import ConfigParser\n'), ((1016, 1034), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1032, 1034), False, 'import requests\n'), ((1047, 1083), 'requests.packages.urllib3.util.retry.Retry', 'Retry', ([], {'connect': '(3)', 'backoff_factor': '(0.5)'}), '(connect=3, backoff_factor=0.5)\n', (1052, 1083), False, 'from requests.packages.urllib3.util.retry import Retry\n'), ((1098, 1128), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'retry'}), '(max_retries=retry)\n', (1109, 1128), False, 'from requests.adapters import HTTPAdapter\n'), ((1596, 1625), 'dataclasses.field', 'field', ([], {'init': '(False)', 'default': '""""""'}), "(init=False, default='')\n", (1601, 1625), False, 'from dataclasses import dataclass, field\n'), ((1688, 1717), 'dataclasses.field', 'field', ([], {'init': '(False)', 'default': '""""""'}), "(init=False, default='')\n", (1693, 1717), False, 'from dataclasses import dataclass, field\n'), ((1748, 1777), 'dataclasses.field', 'field', ([], {'init': '(False)', 'default': '""""""'}), "(init=False, default='')\n", (1753, 1777), False, 'from dataclasses import dataclass, field\n'), ((1808, 1837), 'dataclasses.field', 'field', ([], {'init': '(False)', 'default': '""""""'}), "(init=False, default='')\n", (1813, 1837), False, 'from dataclasses import dataclass, field\n'), ((1859, 1888), 'dataclasses.field', 'field', ([], {'init': '(False)', 'default': '""""""'}), "(init=False, default='')\n", (1864, 1888), False, 'from dataclasses import dataclass, field\n'), ((1909, 1938), 'dataclasses.field', 'field', ([], {'init': '(False)', 'default': '""""""'}), "(init=False, default='')\n", (1914, 1938), False, 'from dataclasses import dataclass, field\n'), ((20707, 20725), 'requests.get', 'requests.get', (['link'], {}), '(link)\n', (20719, 20725), False, 'import requests\n'), ((23396, 23461), 'logging.info', 'logging.info', (['"""Getting actual criticality of the vulnerabilities"""'], {}), "('Getting actual criticality of the vulnerabilities')\n", (23408, 23461), False, 'import logging\n'), ((20913, 20948), 'netaddr.IPNetwork', 'netaddr.IPNetwork', (['"""192.168.0.0/24"""'], {}), "('192.168.0.0/24')\n", (20930, 20948), False, 'import netaddr\n'), ((20974, 21009), 'netaddr.IPNetwork', 'netaddr.IPNetwork', (['"""192.168.2.0/22"""'], {}), "('192.168.2.0/22')\n", (20991, 21009), False, 'import netaddr\n'), ((529, 554), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (544, 554), False, 'import os\n'), ((11582, 11596), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (11592, 11596), False, 'import time\n'), ((12712, 12726), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (12722, 12726), False, 'import time\n'), ((16072, 16086), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (16082, 16086), False, 'import time\n'), ((21063, 21084), 'netaddr.IPAddress', 'netaddr.IPAddress', (['ip'], {}), '(ip)\n', (21080, 21084), False, 'import netaddr\n'), ((21547, 21561), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (21557, 21561), False, 'import time\n'), ((3560, 3621), 're.findall', 're.findall', (['"""KB\\\\d{4,}"""', 'self.resolution'], {'flags': 're.IGNORECASE'}), "('KB\\\\d{4,}', self.resolution, flags=re.IGNORECASE)\n", (3570, 3621), False, 'import re\n'), ((21144, 21165), 'netaddr.IPAddress', 'netaddr.IPAddress', (['ip'], {}), '(ip)\n', (21161, 21165), False, 'import netaddr\n'), ((24113, 24127), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (24123, 24127), False, 'import time\n'), ((3700, 3759), 're.findall', 're.findall', (['"""\\\\d{4,}"""', 'self.resolution'], {'flags': 're.IGNORECASE'}), "('\\\\d{4,}', self.resolution, flags=re.IGNORECASE)\n", (3710, 3759), False, 'import re\n'), ((4122, 4219), 're.findall', 're.findall', (['""".*Security .* (KB\\\\d{4,}) or Cumulative.*"""', 'self.resolution'], {'flags': 're.IGNORECASE'}), "('.*Security .* (KB\\\\d{4,}) or Cumulative.*', self.resolution,\n flags=re.IGNORECASE)\n", (4132, 4219), False, 'import re\n'), ((2723, 2780), 're.search', 're.search', (['""".* KB\\\\d{3,} .*"""', 'self.resolution'], {'flags': 're.I'}), "('.* KB\\\\d{3,} .*', self.resolution, flags=re.I)\n", (2732, 2780), False, 'import re\n'), ((4410, 4487), 're.findall', 're.findall', (['""".*Security .* (KB\\\\d{4,})"""', 'self.resolution'], {'flags': 're.IGNORECASE'}), "('.*Security .* (KB\\\\d{4,})', self.resolution, flags=re.IGNORECASE)\n", (4420, 4487), False, 'import re\n'), ((4686, 4765), 're.findall', 're.findall', (['""".*Cumulative .* (KB\\\\d{4,})"""', 'self.resolution'], {'flags': 're.IGNORECASE'}), "('.*Cumulative .* (KB\\\\d{4,})', self.resolution, flags=re.IGNORECASE)\n", (4696, 4765), False, 'import re\n'), ((5040, 5111), 're.findall', 're.findall', (['"""^MS\\\\d{2,}-\\\\d{3,}"""', 'self.plugin_name'], {'flags': 're.IGNORECASE'}), "('^MS\\\\d{2,}-\\\\d{3,}', self.plugin_name, flags=re.IGNORECASE)\n", (5050, 5111), False, 'import re\n'), ((5562, 5626), 're.findall', 're.findall', (['"""^ADV\\\\d{4,}"""', 'self.plugin_name'], {'flags': 're.IGNORECASE'}), "('^ADV\\\\d{4,}', self.plugin_name, flags=re.IGNORECASE)\n", (5572, 5626), False, 'import re\n'), ((11207, 11230), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11228, 11230), False, 'import datetime\n'), ((11233, 11260), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(15)'}), '(days=15)\n', (11251, 11260), False, 'import datetime\n'), ((18799, 18822), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (18820, 18822), False, 'import datetime\n'), ((18825, 18852), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(30)'}), '(days=30)\n', (18843, 18852), False, 'import datetime\n')]
|
from models.model_group import Group
import random
def test_modify_some_group(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create_new_group(Group(group_name='a', group_header='b', group_footer='c'))
old_groups = db.get_group_list()
group = random.choice(old_groups)
new_group = Group(group_name='k', group_header='b', group_footer='y')
new_group.group_id = group.group_id
app.group.modify_group_by_id(new_group, group.group_id)
new_groups = db.get_group_list()
old_groups.remove(group)
old_groups.append(new_group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
|
[
"models.model_group.Group",
"random.choice"
] |
[((281, 306), 'random.choice', 'random.choice', (['old_groups'], {}), '(old_groups)\n', (294, 306), False, 'import random\n'), ((323, 380), 'models.model_group.Group', 'Group', ([], {'group_name': '"""k"""', 'group_header': '"""b"""', 'group_footer': '"""y"""'}), "(group_name='k', group_header='b', group_footer='y')\n", (328, 380), False, 'from models.model_group import Group\n'), ((173, 230), 'models.model_group.Group', 'Group', ([], {'group_name': '"""a"""', 'group_header': '"""b"""', 'group_footer': '"""c"""'}), "(group_name='a', group_header='b', group_footer='c')\n", (178, 230), False, 'from models.model_group import Group\n')]
|
#!/usr/bin/env python3
import utils, os, random, time, open_color, arcade
utils.check_version((3,7))
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Sprites Example"
class MyGame(arcade.Window):
def __init__(self):
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
arcade.set_background_color(open_color.white)
self.animal_list = arcade.SpriteList()
def setup(self):
self.animal_sprite = arcade.Sprite("assets/Side/detail_crystal.png", 2.0)
self.animal_sprite.center_x = 400
self.animal_sprite.center_y = 300
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/detail_dirt.png", 2.0)
self.animal_sprite.center_x = 500
self.animal_sprite.center_y = 300
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/detail_tree.png", 2.0)
self.animal_sprite.center_x = 450
self.animal_sprite.center_y = 150
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/detail_rocks.png", 2.0)
self.animal_sprite.center_x = 200
self.animal_sprite.center_y = 100
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/snow_tile_bump.png", 2.0)
self.animal_sprite.center_x = 650
self.animal_sprite.center_y = 50
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/snow_tile_hill.png", 2.0)
self.animal_sprite.center_x = 600
self.animal_sprite.center_y = 400
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/snow_tile_riverFall.png", 2.0)
self.animal_sprite.center_x = 450
self.animal_sprite.center_y = 100
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/snow_tile_spawn.png", 2.0)
self.animal_sprite.center_x = 350
self.animal_sprite.center_y = 350
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/towerRound_base.png", 2.0)
self.animal_sprite.center_x = 680
self.animal_sprite.center_y = 133
self.animal_list.append(self.animal_sprite)
self.animal_sprite = arcade.Sprite("assets/Side/towerRound_crystals.png", 2.0)
self.animal_sprite.center_x = 250
self.animal_sprite.center_y = 550
self.animal_list.append(self.animal_sprite)
def on_draw(self):
arcade.start_render()
self.animal_list.draw()
def update(self, delta_time):
pass
def on_mouse_motion(self, x, y, dx, dy):
self.animal_sprite.center_x = x
self.animal_sprite.center_y = y
def main():
""" Main method """
window = MyGame()
window.setup()
arcade.run()
if __name__ == "__main__":
main()
|
[
"os.path.abspath",
"utils.check_version",
"arcade.run",
"arcade.start_render",
"arcade.Sprite",
"arcade.set_background_color",
"arcade.SpriteList",
"os.chdir"
] |
[((76, 103), 'utils.check_version', 'utils.check_version', (['(3, 7)'], {}), '((3, 7))\n', (95, 103), False, 'import utils, os, random, time, open_color, arcade\n'), ((3020, 3032), 'arcade.run', 'arcade.run', ([], {}), '()\n', (3030, 3032), False, 'import utils, os, random, time, open_color, arcade\n'), ((371, 390), 'os.chdir', 'os.chdir', (['file_path'], {}), '(file_path)\n', (379, 390), False, 'import utils, os, random, time, open_color, arcade\n'), ((399, 444), 'arcade.set_background_color', 'arcade.set_background_color', (['open_color.white'], {}), '(open_color.white)\n', (426, 444), False, 'import utils, os, random, time, open_color, arcade\n'), ((481, 500), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (498, 500), False, 'import utils, os, random, time, open_color, arcade\n'), ((552, 604), 'arcade.Sprite', 'arcade.Sprite', (['"""assets/Side/detail_crystal.png"""', '(2.0)'], {}), "('assets/Side/detail_crystal.png', 2.0)\n", (565, 604), False, 'import utils, os, random, time, open_color, arcade\n'), ((767, 816), 'arcade.Sprite', 'arcade.Sprite', (['"""assets/Side/detail_dirt.png"""', '(2.0)'], {}), "('assets/Side/detail_dirt.png', 2.0)\n", (780, 816), False, 'import utils, os, random, time, open_color, arcade\n'), ((979, 1028), 'arcade.Sprite', 'arcade.Sprite', (['"""assets/Side/detail_tree.png"""', '(2.0)'], {}), "('assets/Side/detail_tree.png', 2.0)\n", (992, 1028), False, 'import utils, os, random, time, open_color, arcade\n'), ((1191, 1241), 'arcade.Sprite', 'arcade.Sprite', (['"""assets/Side/detail_rocks.png"""', '(2.0)'], {}), "('assets/Side/detail_rocks.png', 2.0)\n", (1204, 1241), False, 'import utils, os, random, time, open_color, arcade\n'), ((1404, 1456), 'arcade.Sprite', 'arcade.Sprite', (['"""assets/Side/snow_tile_bump.png"""', '(2.0)'], {}), "('assets/Side/snow_tile_bump.png', 2.0)\n", (1417, 1456), False, 'import utils, os, random, time, open_color, arcade\n'), ((1618, 1670), 'arcade.Sprite', 'arcade.Sprite', (['"""assets/Side/snow_tile_hill.png"""', '(2.0)'], {}), "('assets/Side/snow_tile_hill.png', 2.0)\n", (1631, 1670), False, 'import utils, os, random, time, open_color, arcade\n'), ((1833, 1890), 'arcade.Sprite', 'arcade.Sprite', (['"""assets/Side/snow_tile_riverFall.png"""', '(2.0)'], {}), "('assets/Side/snow_tile_riverFall.png', 2.0)\n", (1846, 1890), False, 'import utils, os, random, time, open_color, arcade\n'), ((2053, 2106), 'arcade.Sprite', 'arcade.Sprite', (['"""assets/Side/snow_tile_spawn.png"""', '(2.0)'], {}), "('assets/Side/snow_tile_spawn.png', 2.0)\n", (2066, 2106), False, 'import utils, os, random, time, open_color, arcade\n'), ((2269, 2322), 'arcade.Sprite', 'arcade.Sprite', (['"""assets/Side/towerRound_base.png"""', '(2.0)'], {}), "('assets/Side/towerRound_base.png', 2.0)\n", (2282, 2322), False, 'import utils, os, random, time, open_color, arcade\n'), ((2485, 2542), 'arcade.Sprite', 'arcade.Sprite', (['"""assets/Side/towerRound_crystals.png"""', '(2.0)'], {}), "('assets/Side/towerRound_crystals.png', 2.0)\n", (2498, 2542), False, 'import utils, os, random, time, open_color, arcade\n'), ((2708, 2729), 'arcade.start_render', 'arcade.start_render', ([], {}), '()\n', (2727, 2729), False, 'import utils, os, random, time, open_color, arcade\n'), ((336, 361), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (351, 361), False, 'import utils, os, random, time, open_color, arcade\n')]
|
import logging
import os
import numpy as np
from sklearn.metrics import classification_report
import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.nn import CrossEntropyLoss
from data.dataset import COVIDxFolder
from data import transforms
from torch.utils.data import DataLoader
from model import architecture
import util
import config
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def save_model(model, config):
if isinstance(model, torch.nn.DataParallel):
# Save without the DataParallel module
model_dict = model.module.state_dict()
else:
model_dict = model.state_dict()
state = {
"state_dict": model_dict,
"global_step": config['global_step'],
"clf_report": config['clf_report']
}
f1_macro = config['clf_report']['macro avg']['f1-score'] * 100
name = "{}_F1_{:.2f}_step_{}.pth".format(config['name'],
f1_macro,
config['global_step'])
model_path = os.path.join(config['save_dir'], name)
torch.save(state, model_path)
log.info("Saved model to {}".format(model_path))
def validate(data_loader, model, best_score, global_step, cfg):
model.eval()
gts, predictions = [], []
log.info("Validation started...")
for data in data_loader:
imgs, labels = data
imgs = util.to_device(imgs, gpu=cfg.gpu)
with torch.no_grad():
logits = model(imgs)
probs = model.module.probability(logits)
preds = torch.argmax(probs, dim=1).cpu().numpy()
labels = labels.cpu().detach().numpy()
predictions.extend(preds)
gts.extend(labels)
predictions = np.array(predictions, dtype=np.int32)
gts = np.array(gts, dtype=np.int32)
acc, f1, prec, rec = util.clf_metrics(predictions=predictions,
targets=gts,
average="macro")
report = classification_report(gts, predictions, output_dict=True)
log.info("VALIDATION | Accuracy {:.4f} | F1 {:.4f} | Precision {:.4f} | "
"Recall {:.4f}".format(acc, f1, prec, rec))
if acc > best_score:
save_config = {
'name': config.name,
'save_dir': config.ckpts_dir,
'global_step': global_step,
'clf_report': report
}
save_model(model=model, config=save_config)
best_score = acc
log.info("Validation end")
model.train()
return best_score
def main():
if config.gpu and not torch.cuda.is_available():
raise ValueError("GPU not supported or enabled on this system.")
use_gpu = config.gpu
log.info("Loading train dataset")
train_dataset = COVIDxFolder(config.train_imgs, config.train_labels,
transforms.train_transforms(config.width,
config.height))
train_loader = DataLoader(train_dataset,
batch_size=config.batch_size,
shuffle=True,
drop_last=True,
num_workers=config.n_threads,
pin_memory=use_gpu)
log.info("Number of training examples {}".format(len(train_dataset)))
log.info("Loading val dataset")
val_dataset = COVIDxFolder(config.val_imgs, config.val_labels,
transforms.val_transforms(config.width,
config.height))
val_loader = DataLoader(val_dataset,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.n_threads,
pin_memory=use_gpu)
log.info("Number of validation examples {}".format(len(val_dataset)))
if config.weights:
#state = torch.load(config.weights)
state = None
log.info("Loaded model weights from: {}".format(config.weights))
else:
state = None
state_dict = state["state_dict"] if state else None
model = architecture.COVIDNext50(n_classes=config.n_classes)
if state_dict:
model = util.load_model_weights(model=model, state_dict=state_dict)
if use_gpu:
model.cuda()
model = torch.nn.DataParallel(model)
optim_layers = filter(lambda p: p.requires_grad, model.parameters())
# optimizer and lr scheduler
optimizer = Adam(optim_layers,
lr=config.lr,
weight_decay=config.weight_decay)
scheduler = ReduceLROnPlateau(optimizer=optimizer,
factor=config.lr_reduce_factor,
patience=config.lr_reduce_patience,
mode='max',
min_lr=1e-7)
# Load the last global_step from the checkpoint if existing
global_step = 0 if state is None else state['global_step'] + 1
class_weights = util.to_device(torch.FloatTensor(config.loss_weights),
gpu=use_gpu)
loss_fn = CrossEntropyLoss()
# Reset the best metric score
best_score = -1
for epoch in range(config.epochs):
log.info("Started epoch {}/{}".format(epoch + 1,
config.epochs))
for data in train_loader:
imgs, labels = data
imgs = util.to_device(imgs, gpu=use_gpu)
labels = util.to_device(labels, gpu=use_gpu)
logits = model(imgs)
loss = loss_fn(logits, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if global_step % config.log_steps == 0 and global_step > 0:
probs = model.module.probability(logits)
preds = torch.argmax(probs, dim=1).detach().cpu().numpy()
labels = labels.cpu().detach().numpy()
acc, f1, _, _ = util.clf_metrics(preds, labels)
lr = util.get_learning_rate(optimizer)
log.info("Step {} | TRAINING batch: Loss {:.4f} | F1 {:.4f} | "
"Accuracy {:.4f} | LR {:.2e}".format(global_step,
loss.item(),
f1, acc,
lr))
if global_step % config.eval_steps == 0 and global_step > 0:
best_score = validate(val_loader,
model,
best_score=best_score,
global_step=global_step,
cfg=config)
scheduler.step(best_score)
global_step += 1
if __name__ == '__main__':
seed = config.random_seed
if seed:
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
main()
|
[
"numpy.random.seed",
"model.architecture.COVIDNext50",
"torch.argmax",
"sklearn.metrics.classification_report",
"torch.no_grad",
"util.load_model_weights",
"os.path.join",
"util.clf_metrics",
"torch.utils.data.DataLoader",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.FloatTensor",
"torch.manual_seed",
"torch.optim.Adam",
"torch.cuda.is_available",
"util.get_learning_rate",
"logging.basicConfig",
"util.to_device",
"torch.nn.CrossEntropyLoss",
"torch.save",
"torch.cuda.manual_seed_all",
"numpy.array",
"data.transforms.val_transforms",
"torch.nn.DataParallel",
"logging.getLogger",
"data.transforms.train_transforms"
] |
[((402, 429), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (419, 429), False, 'import logging\n'), ((430, 469), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (449, 469), False, 'import logging\n'), ((1108, 1146), 'os.path.join', 'os.path.join', (["config['save_dir']", 'name'], {}), "(config['save_dir'], name)\n", (1120, 1146), False, 'import os\n'), ((1151, 1180), 'torch.save', 'torch.save', (['state', 'model_path'], {}), '(state, model_path)\n', (1161, 1180), False, 'import torch\n'), ((1799, 1836), 'numpy.array', 'np.array', (['predictions'], {'dtype': 'np.int32'}), '(predictions, dtype=np.int32)\n', (1807, 1836), True, 'import numpy as np\n'), ((1847, 1876), 'numpy.array', 'np.array', (['gts'], {'dtype': 'np.int32'}), '(gts, dtype=np.int32)\n', (1855, 1876), True, 'import numpy as np\n'), ((1902, 1973), 'util.clf_metrics', 'util.clf_metrics', ([], {'predictions': 'predictions', 'targets': 'gts', 'average': '"""macro"""'}), "(predictions=predictions, targets=gts, average='macro')\n", (1918, 1973), False, 'import util\n'), ((2071, 2128), 'sklearn.metrics.classification_report', 'classification_report', (['gts', 'predictions'], {'output_dict': '(True)'}), '(gts, predictions, output_dict=True)\n', (2092, 2128), False, 'from sklearn.metrics import classification_report\n'), ((3110, 3249), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'config.batch_size', 'shuffle': '(True)', 'drop_last': '(True)', 'num_workers': 'config.n_threads', 'pin_memory': 'use_gpu'}), '(train_dataset, batch_size=config.batch_size, shuffle=True,\n drop_last=True, num_workers=config.n_threads, pin_memory=use_gpu)\n', (3120, 3249), False, 'from torch.utils.data import DataLoader\n'), ((3735, 3857), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'config.batch_size', 'shuffle': '(False)', 'num_workers': 'config.n_threads', 'pin_memory': 'use_gpu'}), '(val_dataset, batch_size=config.batch_size, shuffle=False,\n num_workers=config.n_threads, pin_memory=use_gpu)\n', (3745, 3857), False, 'from torch.utils.data import DataLoader\n'), ((4302, 4354), 'model.architecture.COVIDNext50', 'architecture.COVIDNext50', ([], {'n_classes': 'config.n_classes'}), '(n_classes=config.n_classes)\n', (4326, 4354), False, 'from model import architecture\n'), ((4656, 4722), 'torch.optim.Adam', 'Adam', (['optim_layers'], {'lr': 'config.lr', 'weight_decay': 'config.weight_decay'}), '(optim_layers, lr=config.lr, weight_decay=config.weight_decay)\n', (4660, 4722), False, 'from torch.optim import Adam\n'), ((4781, 4917), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'optimizer': 'optimizer', 'factor': 'config.lr_reduce_factor', 'patience': 'config.lr_reduce_patience', 'mode': '"""max"""', 'min_lr': '(1e-07)'}), "(optimizer=optimizer, factor=config.lr_reduce_factor,\n patience=config.lr_reduce_patience, mode='max', min_lr=1e-07)\n", (4798, 4917), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((5319, 5337), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (5335, 5337), False, 'from torch.nn import CrossEntropyLoss\n'), ((1458, 1491), 'util.to_device', 'util.to_device', (['imgs'], {'gpu': 'cfg.gpu'}), '(imgs, gpu=cfg.gpu)\n', (1472, 1491), False, 'import util\n'), ((2972, 3028), 'data.transforms.train_transforms', 'transforms.train_transforms', (['config.width', 'config.height'], {}), '(config.width, config.height)\n', (2999, 3028), False, 'from data import transforms\n'), ((3605, 3659), 'data.transforms.val_transforms', 'transforms.val_transforms', (['config.width', 'config.height'], {}), '(config.width, config.height)\n', (3630, 3659), False, 'from data import transforms\n'), ((4390, 4449), 'util.load_model_weights', 'util.load_model_weights', ([], {'model': 'model', 'state_dict': 'state_dict'}), '(model=model, state_dict=state_dict)\n', (4413, 4449), False, 'import util\n'), ((4504, 4532), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (4525, 4532), False, 'import torch\n'), ((5217, 5255), 'torch.FloatTensor', 'torch.FloatTensor', (['config.loss_weights'], {}), '(config.loss_weights)\n', (5234, 5255), False, 'import torch\n'), ((7137, 7157), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7151, 7157), True, 'import numpy as np\n'), ((7166, 7189), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (7183, 7189), False, 'import torch\n'), ((7201, 7226), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7224, 7226), False, 'import torch\n'), ((1506, 1521), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1519, 1521), False, 'import torch\n'), ((2702, 2727), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2725, 2727), False, 'import torch\n'), ((5636, 5669), 'util.to_device', 'util.to_device', (['imgs'], {'gpu': 'use_gpu'}), '(imgs, gpu=use_gpu)\n', (5650, 5669), False, 'import util\n'), ((5691, 5726), 'util.to_device', 'util.to_device', (['labels'], {'gpu': 'use_gpu'}), '(labels, gpu=use_gpu)\n', (5705, 5726), False, 'import util\n'), ((7240, 7272), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (7266, 7272), False, 'import torch\n'), ((6186, 6217), 'util.clf_metrics', 'util.clf_metrics', (['preds', 'labels'], {}), '(preds, labels)\n', (6202, 6217), False, 'import util\n'), ((6239, 6272), 'util.get_learning_rate', 'util.get_learning_rate', (['optimizer'], {}), '(optimizer)\n', (6261, 6272), False, 'import util\n'), ((1629, 1655), 'torch.argmax', 'torch.argmax', (['probs'], {'dim': '(1)'}), '(probs, dim=1)\n', (1641, 1655), False, 'import torch\n'), ((6049, 6075), 'torch.argmax', 'torch.argmax', (['probs'], {'dim': '(1)'}), '(probs, dim=1)\n', (6061, 6075), False, 'import torch\n')]
|
import cv2
import numpy as np
from calibration import get_calib_from_file
# kitti
# name = '000000'
# pc_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/kitti/velodyne/'+name+'.bin'
# img_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/kitti/image_2/'+name+'.png'
# calib_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/kitti/calib/'+name+'.txt'
# waymo-kitti
name = '00000-00001'
pc_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/waymo_kitti/velodyne/'+name+'.bin'
img_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/waymo_kitti/image_0/'+name+'.png'
calib_pathname = '/home/alex/github/waymo_to_kitti_converter/tools/waymo_kitti/calib/'+name+'.txt'
def cart_to_homo(mat):
mat = np.vstack([mat, np.ones((1, mat.shape[1]))])
return mat
def pc_to_pt(pc, V2C, R0, P):
def cart2hom(pts_3d):
""" Input: nx3 points in Cartesian
Oupput: nx4 points in Homogeneous by pending 1
"""
n = pts_3d.shape[0]
pts_3d_hom = np.hstack((pts_3d, np.ones((n, 1))))
return pts_3d_hom
def project_velo_to_ref(pts_3d_velo):
pts_3d_velo = cart2hom(pts_3d_velo) # nx4
return np.dot(pts_3d_velo, np.transpose(V2C))
def project_ref_to_rect(pts_3d_ref):
""" Input and Output are nx3 points """
return np.transpose(np.dot(R0, np.transpose(pts_3d_ref)))
def project_rect_to_image(pts_3d_rect):
""" Input: nx3 points in rect camera coord.
Output: nx2 points in image2 coord.
"""
pts_3d_rect = cart2hom(pts_3d_rect)
pts_2d = np.dot(pts_3d_rect, np.transpose(P)) # nx3
pts_2d[:, 0] /= pts_2d[:, 2]
pts_2d[:, 1] /= pts_2d[:, 2]
return pts_2d[:, 0:2]
# filter behind
ind = pc[:, 0] > 0 # lidar: x is front
pc = pc[ind, :]
print('pc', pc)
ref = project_velo_to_ref(pc)
print('ref',ref)
rect = project_ref_to_rect(ref)
print('rect', rect)
depth = rect[:, 2]
print(rect.shape, depth.shape)
image = project_rect_to_image(rect)
return image, depth
def main():
calib = get_calib_from_file(calib_pathname)
v2c = calib['Tr_velo2cam']
r0 = calib['R0']
px = calib['P2']
# v2c = np.array([
# [7.533745000000e-03, -9.999714000000e-01, -6.166020000000e-04, -4.069766000000e-03],
# [1.480249000000e-02, 7.280733000000e-04, -9.998902000000e-01, -7.631618000000e-02],
# [9.998621000000e-01, 7.523790000000e-03, 1.480755000000e-02, -2.717806000000e-01]])
# r0 = np.array([
# [9.999239000000e-01, 9.837760000000e-03, -7.445048000000e-03],
# [-9.869795000000e-03, 9.999421000000e-01, -4.278459000000e-03],
# [7.402527000000e-03, 4.351614000000e-03, 9.999631000000e-01]])
# px = np.array([
# [7.215377000000e+02, 0.000000000000e+00, 6.095593000000e+02, 4.485728000000e+01],
# [0.000000000000e+00, 7.215377000000e+02, 1.728540000000e+02, 2.163791000000e-01],
# [0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 2.745884000000e-03]])
pc = np.fromfile(pc_pathname, dtype=np.float32).reshape((-1, 4))[:, :3]
# filter all behind image plane
keep = []
for i in range(pc.shape[0]):
p = pc[i, :]
if p[0] > 0:
keep.append(p)
# pc = np.vstack(keep)
#
# tmp = np.eye(4)
# tmp[:3, :3] = r0
# r0 = tmp
# pc = np.transpose(pc) # (n,3) -> (3,n)
# pc = cart_to_homo(pc) # (3,n) -> (4,n)
#
# v2c = cart_to_homo(v2c) # (3,4) -> (4,4)
#
# print(px.shape, r0.shape, v2c.shape, pc.shape)
pt, depth = pc_to_pt(pc, v2c, r0, px)
print(pt.shape, depth.shape)
# pt = px @ r0 @ v2c @ pc
# print(pt.shape)
# pt = pt[:2] / pt[2]
print(pt)
import matplotlib.pyplot as plt
cmap = plt.cm.get_cmap("hsv", 256)
cmap = np.array([cmap(i) for i in range(256)])[:, :3] * 255
# draw
img = cv2.imread(img_pathname)
for i in range(pt.shape[0]):
x = pt[i, 0]
y = pt[i, 1]
color = cmap[np.clip(640/depth[i], 0, 255).astype(np.int), :]
# if 0 < x < 1920 and 0 < y < 1080:
# print('yah')
# print(int(x), int(y))
cv2.circle(img, (int(x), int(y)), 1, tuple(color), -1)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
while True:
cv2.imshow('image', img)
key = cv2.waitKey(1)
if key == 27: # exit
break
elif key != -1:
print('Undefined key:', key)
if __name__ == '__main__':
main()
|
[
"cv2.waitKey",
"calibration.get_calib_from_file",
"numpy.fromfile",
"numpy.transpose",
"numpy.ones",
"numpy.clip",
"cv2.imread",
"cv2.imshow",
"matplotlib.pyplot.cm.get_cmap",
"cv2.namedWindow"
] |
[((2141, 2176), 'calibration.get_calib_from_file', 'get_calib_from_file', (['calib_pathname'], {}), '(calib_pathname)\n', (2160, 2176), False, 'from calibration import get_calib_from_file\n'), ((3845, 3872), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""hsv"""', '(256)'], {}), "('hsv', 256)\n", (3860, 3872), True, 'import matplotlib.pyplot as plt\n'), ((3959, 3983), 'cv2.imread', 'cv2.imread', (['img_pathname'], {}), '(img_pathname)\n', (3969, 3983), False, 'import cv2\n'), ((4301, 4344), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""', 'cv2.WINDOW_NORMAL'], {}), "('image', cv2.WINDOW_NORMAL)\n", (4316, 4344), False, 'import cv2\n'), ((4369, 4393), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (4379, 4393), False, 'import cv2\n'), ((4408, 4422), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4419, 4422), False, 'import cv2\n'), ((770, 796), 'numpy.ones', 'np.ones', (['(1, mat.shape[1])'], {}), '((1, mat.shape[1]))\n', (777, 796), True, 'import numpy as np\n'), ((1228, 1245), 'numpy.transpose', 'np.transpose', (['V2C'], {}), '(V2C)\n', (1240, 1245), True, 'import numpy as np\n'), ((1641, 1656), 'numpy.transpose', 'np.transpose', (['P'], {}), '(P)\n', (1653, 1656), True, 'import numpy as np\n'), ((1055, 1070), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (1062, 1070), True, 'import numpy as np\n'), ((1376, 1400), 'numpy.transpose', 'np.transpose', (['pts_3d_ref'], {}), '(pts_3d_ref)\n', (1388, 1400), True, 'import numpy as np\n'), ((3109, 3151), 'numpy.fromfile', 'np.fromfile', (['pc_pathname'], {'dtype': 'np.float32'}), '(pc_pathname, dtype=np.float32)\n', (3120, 3151), True, 'import numpy as np\n'), ((4081, 4112), 'numpy.clip', 'np.clip', (['(640 / depth[i])', '(0)', '(255)'], {}), '(640 / depth[i], 0, 255)\n', (4088, 4112), True, 'import numpy as np\n')]
|
from __future__ import division, print_function
# coding=utf-8
import sys
import os
import glob
import re
import numpy as np
import tensorflow as tf
import pathlib
import wget
# from tensorflow.compat.v1.compat import ConfigProto
# from tensorflow.compat.v1 import InteractiveSession
#from tensorflow.python.client.session import InteractiveSession
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.2
# config.gpu_options.allow_growth = True
# session = InteractiveSession(config=config)
# Keras
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
# from gevent.pywsgi import WSGIServer
# Model saved with Keras model.save()
MODEL_PATH = 'model_resnet.hdf5'
MODEL_URL = 'https://github.com/DARK-art108/Cotton-Leaf-Disease-Prediction/releases/download/v1.0/model_resnet.hdf5'
UPLOAD_FOLDER = os.path.join(os.path.dirname(__file__), 'static', 'uploads')
# Download model if not present
while not pathlib.Path(MODEL_PATH).is_file():
print(f'Model {MODEL_PATH} not found. Downloading...')
wget.download(MODEL_URL)
# Define a flask app
app = Flask(__name__)
# Define upload path
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# Developing in the absence of TensorFlow :P (Python 3.9.0 x64)
# def load_model(aa):
# class a:
# @staticmethod
# def predict(*args):
# return 1
# return a()
# class image:
# @staticmethod
# def load_img(path, target_size):
# return 'a'
# @staticmethod
# def img_to_array(img):
# return 'v'
# Load your trained model
model = load_model(MODEL_PATH)
def model_predict(img_path, model):
print(img_path)
img = image.load_img(img_path, target_size=(224, 224))
# Preprocessing the image
x = image.img_to_array(img)
# x = np.true_divide(x, 255)
## Scaling
x = x / 255
x = np.expand_dims(x, axis=0)
# Be careful how your trained model deals with the input
# otherwise, it won't make correct prediction!
# x = preprocess_input(x)
preds = model.predict(x)
preds = np.argmax(preds, axis=1)
if preds == 0:
preds = "The leaf is a diseased cotton leaf."
elif preds == 1:
preds = "The leaf is a diseased cotton plant."
elif preds == 2:
preds = "The leaf is a fresh cotton leaf."
else:
preds = "The leaf is a fresh cotton plant."
return preds
@app.route('/', methods=['GET', 'POST'])
def index():
# Main page
if request.method == 'POST':
# Get the file from post request
print(request.files, request.form, request.args)
f = None
if 'image' in request.files: f = request.files['image']
if f:
# Save the file to ./uploads
file_path = os.path.join(
app.config['UPLOAD_FOLDER'], secure_filename(f.filename))
f.save(file_path)
# Make prediction
preds = model_predict(file_path, model)
result = preds
return render_template('index.html', result=result, img=secure_filename(f.filename))
return render_template('index.html', result=None, err='Failed to receive file')
# First time
return render_template('index.html', result=None)
if __name__ == '__main__':
app.run(port=5001, debug=True)
|
[
"tensorflow.keras.models.load_model",
"numpy.argmax",
"os.path.dirname",
"flask.Flask",
"tensorflow.keras.preprocessing.image.img_to_array",
"numpy.expand_dims",
"werkzeug.utils.secure_filename",
"wget.download",
"tensorflow.keras.preprocessing.image.load_img",
"pathlib.Path",
"flask.render_template"
] |
[((1368, 1383), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1373, 1383), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((1871, 1893), 'tensorflow.keras.models.load_model', 'load_model', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (1881, 1893), False, 'from tensorflow.keras.models import load_model\n'), ((1117, 1142), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1132, 1142), False, 'import os\n'), ((1312, 1336), 'wget.download', 'wget.download', (['MODEL_URL'], {}), '(MODEL_URL)\n', (1325, 1336), False, 'import wget\n'), ((1967, 2015), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (1981, 2015), False, 'from tensorflow.keras.preprocessing import image\n'), ((2058, 2081), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (2076, 2081), False, 'from tensorflow.keras.preprocessing import image\n'), ((2158, 2183), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2172, 2183), True, 'import numpy as np\n'), ((2376, 2400), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (2385, 2400), True, 'import numpy as np\n'), ((3539, 3581), 'flask.render_template', 'render_template', (['"""index.html"""'], {'result': 'None'}), "('index.html', result=None)\n", (3554, 3581), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((3436, 3508), 'flask.render_template', 'render_template', (['"""index.html"""'], {'result': 'None', 'err': '"""Failed to receive file"""'}), "('index.html', result=None, err='Failed to receive file')\n", (3451, 3508), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((1211, 1235), 'pathlib.Path', 'pathlib.Path', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (1223, 1235), False, 'import pathlib\n'), ((3148, 3175), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (3163, 3175), False, 'from werkzeug.utils import secure_filename\n'), ((3391, 3418), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (3406, 3418), False, 'from werkzeug.utils import secure_filename\n')]
|
#!/usr/bin/env python
# coding=utf-8
import json
import pytest
from bsAbstimmungen.importer.votingimporter import VotingParser
from bsAbstimmungen.exceptions import AlreadyImportedException
from ..utils import mockdb
def test_raise_exception_when_alread_parsed(mockdb):
parser = VotingParser(mockdb)
parser.parse(
'tests/data/Abst_0205_20130109_111125_0003_0000_sa.pdf'
)
with pytest.raises(AlreadyImportedException) as excinfo:
parser.parse('tests/data/Abst_0205_20130109_111125_0003_0000_sa.pdf')
def test_reuse_existing_councillors(mockdb):
parser = VotingParser(mockdb)
parser.parse(
'tests/data/Abst_0205_20130109_111125_0003_0000_sa.pdf'
)
parser.parse(
'tests/data/Abst_0147_20130605_090518_0001_0000_ab.pdf'
)
# Check the rough numbers
assert 2 == mockdb['votes'].count()
assert 124 == mockdb['councillors'].count()
def test_multiline_affairs(mockdb):
parser = VotingParser(mockdb)
parser.parse('tests/data/Abst_0205_20130109_111125_0003_0000_sa.pdf')
vote = mockdb['votes'].find()[0]
assert ('Bericht der Umwelt-, Verkehrs- und '
'Energiekommission zum Ratschlag Nr. 12.0788.01 '
'Rahmenausgabenbewilligung zur weiteren Umsetzung '
'von Tempo 30. Projektierung und Umsetzung von '
'Massnahmen aus dem aktualisierten Tempo 30-Konzept '
'sowie Bericht zu zehn Anzügen und zu zwei '
'Petitionen sowie Bericht der Kommissionsminderheit' ==
vote['affair'])
def test_parser_extracts_data(mockdb):
parser = VotingParser(mockdb)
parser.parse(
'tests/data/Abst_0147_20130605_090518_0001_0000_ab.pdf'
)
assert 1 == mockdb['votes'].count()
assert 100 == mockdb['councillors'].count()
# Load verification details
verification = json.load(open(
'tests/data/Abst_0147_20130605_090518_0001_0000_ab.json'
))
# Verify the imported vote
vote = mockdb['votes'].find_one({'nr': verification['vote']['nr']})
assert verification['vote']['timestamp'] == vote['timestamp'].isoformat()
assert verification['vote']['affair'] == vote['affair']
assert verification['vote']['proposal'] == vote['proposal']
assert verification['vote']['question'] == vote['question']
assert verification['vote']['type'] == vote['type']
# Verify all counillors
for councillor in verification['votings']:
loaded = mockdb['councillors'].find_one({'fullname':
councillor['name']})
assert councillor['name'] == loaded['fullname']
assert councillor['fraction'] == loaded['fraction']
assert councillor['voting'] == loaded['votings'][0]['voting']
|
[
"pytest.raises",
"bsAbstimmungen.importer.votingimporter.VotingParser"
] |
[((285, 305), 'bsAbstimmungen.importer.votingimporter.VotingParser', 'VotingParser', (['mockdb'], {}), '(mockdb)\n', (297, 305), False, 'from bsAbstimmungen.importer.votingimporter import VotingParser\n'), ((593, 613), 'bsAbstimmungen.importer.votingimporter.VotingParser', 'VotingParser', (['mockdb'], {}), '(mockdb)\n', (605, 613), False, 'from bsAbstimmungen.importer.votingimporter import VotingParser\n'), ((960, 980), 'bsAbstimmungen.importer.votingimporter.VotingParser', 'VotingParser', (['mockdb'], {}), '(mockdb)\n', (972, 980), False, 'from bsAbstimmungen.importer.votingimporter import VotingParser\n'), ((1602, 1622), 'bsAbstimmungen.importer.votingimporter.VotingParser', 'VotingParser', (['mockdb'], {}), '(mockdb)\n', (1614, 1622), False, 'from bsAbstimmungen.importer.votingimporter import VotingParser\n'), ((403, 442), 'pytest.raises', 'pytest.raises', (['AlreadyImportedException'], {}), '(AlreadyImportedException)\n', (416, 442), False, 'import pytest\n')]
|
from __future__ import absolute_import, print_function, unicode_literals
import sys
from tabulate import tabulate
from metapub import MedGenFetcher
try:
term = sys.argv[1]
except IndexError:
print('Supply a disease/syndrome/condition name in quotation marks as the argument to this script.')
sys.exit()
####
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("eutils").setLevel(logging.INFO)
####
fetch = MedGenFetcher()
uids = fetch.uids_by_term(term)
print(uids)
headers = ['CUI', 'Title', 'Semantic Type', 'MedGenUID',
'OMIM ID', 'Modes of Inheritance', 'Assoc Genes', ]
table = []
def _join_or_NA(some_list, select=None, joiner=','):
'returns a joined string or NA if empty'
if some_list and select:
return joiner.join(item[select] for item in some_list)
elif some_list:
return joiner.join([item for item in some_list])
else:
return 'NA'
for this_id in uids:
concept = fetch.concept_by_uid(this_id)
#print concept.to_dict()
assert concept.medgen_uid == this_id
if concept.associated_genes:
line = [concept.CUI, concept.title, concept.semantic_type, concept.medgen_uid]
line.append(_join_or_NA(concept.OMIM))
line.append(_join_or_NA(concept.modes_of_inheritance, 'name'))
line.append(_join_or_NA(concept.associated_genes, 'hgnc'))
table.append(line)
else:
continue
print(tabulate(table, headers, tablefmt="simple"))
|
[
"metapub.MedGenFetcher",
"logging.getLogger",
"tabulate.tabulate",
"sys.exit"
] |
[((460, 475), 'metapub.MedGenFetcher', 'MedGenFetcher', ([], {}), '()\n', (473, 475), False, 'from metapub import MedGenFetcher\n'), ((1457, 1500), 'tabulate.tabulate', 'tabulate', (['table', 'headers'], {'tablefmt': '"""simple"""'}), "(table, headers, tablefmt='simple')\n", (1465, 1500), False, 'from tabulate import tabulate\n'), ((307, 317), 'sys.exit', 'sys.exit', ([], {}), '()\n', (315, 317), False, 'import sys\n'), ((339, 368), 'logging.getLogger', 'logging.getLogger', (['"""requests"""'], {}), "('requests')\n", (356, 368), False, 'import logging\n'), ((395, 422), 'logging.getLogger', 'logging.getLogger', (['"""eutils"""'], {}), "('eutils')\n", (412, 422), False, 'import logging\n')]
|
from unittest import mock
from cterasdk import exception
from cterasdk.common import Object
from cterasdk.edge.enum import VolumeStatus
from cterasdk.edge import volumes
from tests.ut import base_edge
class TestEdgeVolumes(base_edge.BaseEdgeTest):
_drive_1 = 'SATA-1'
_drive_2 = 'SATA-2'
_drive_size = 81920
_mount_task_1 = (1, 'Task 1')
_mount_task_2 = (2, 'Task 2')
def setUp(self):
super().setUp()
self._volume_1_name = 'localcache'
self._volume_2_name = 'logs'
self._volumes = [self._volume_1_name, self._volume_2_name]
self._volume_passphrase = 'password'
self._mount_id = 'task id'
def test_get_all_volumes(self):
get_response = 'Success'
self._init_filer(get_response=get_response)
ret = volumes.Volumes(self._filer).get()
self._filer.get.assert_called_once_with('/config/storage/volumes')
self.assertEqual(ret, get_response)
def test_get_volume(self):
get_response = 'Success'
self._init_filer(get_response=get_response)
ret = volumes.Volumes(self._filer).get(self._volume_1_name)
self._filer.get.assert_called_once_with('/config/storage/volumes/' + self._volume_1_name)
self.assertEqual(ret, get_response)
def test_add_volume_default_args_single_device_success(self):
add_response = 'Success'
self._init_filer(add_response=add_response)
self._filer.get = mock.MagicMock(side_effect=TestEdgeVolumes._mock_no_arrays_single_drive)
track_volume_creation_status_mock = self.patch_call("cterasdk.edge.volumes.track")
ret = volumes.Volumes(self._filer).add(self._volume_1_name)
self._filer.get.assert_has_calls(
[
mock.call('/status/storage/arrays'),
mock.call('/status/storage/disks')
]
)
track_volume_creation_status_mock.assert_called_once_with(self._filer,
'/status/storage/volumes/' + self._volume_1_name + '/status',
[VolumeStatus.Ok],
[VolumeStatus.Formatting],
[VolumeStatus.Mounting, VolumeStatus.Checking, VolumeStatus.Repairing],
[VolumeStatus.Corrupted, VolumeStatus.Unknown])
self._filer.add.assert_called_once_with('/config/storage/volumes', mock.ANY)
expected_param = self._get_add_volume_param(device=TestEdgeVolumes._drive_1, size=TestEdgeVolumes._drive_size)
actual_param = self._filer.add.call_args[0][1]
self._assert_equal_objects(actual_param, expected_param)
self.assertEqual(ret, add_response)
def test_add_encrypted_volume_default_args_single_device_success(self):
add_response = 'Success'
self._init_filer(add_response=add_response)
self._filer.get = mock.MagicMock(side_effect=TestEdgeVolumes._mock_no_arrays_single_drive)
track_volume_creation_status_mock = self.patch_call("cterasdk.edge.volumes.track")
ret = volumes.Volumes(self._filer).add(self._volume_1_name, passphrase=self._volume_passphrase)
self._filer.get.assert_has_calls(
[
mock.call('/status/storage/arrays'),
mock.call('/status/storage/disks')
]
)
track_volume_creation_status_mock.assert_called_once_with(self._filer,
'/status/storage/volumes/' + self._volume_1_name + '/status',
[VolumeStatus.Ok],
[VolumeStatus.Formatting],
[VolumeStatus.Mounting, VolumeStatus.Checking, VolumeStatus.Repairing],
[VolumeStatus.Corrupted, VolumeStatus.Unknown])
self._filer.add.assert_called_once_with('/config/storage/volumes', mock.ANY)
expected_param = self._get_add_volume_param(device=TestEdgeVolumes._drive_1,
size=TestEdgeVolumes._drive_size,
passphrase=self._volume_passphrase)
actual_param = self._filer.add.call_args[0][1]
self._assert_equal_objects(actual_param, expected_param)
self.assertEqual(ret, add_response)
def test_add_volume_no_devices(self):
self._init_filer()
self._filer.get = mock.MagicMock(side_effect=TestEdgeVolumes._mock_no_devices)
with self.assertRaises(exception.CTERAException) as error:
volumes.Volumes(self._filer).add(self._volume_1_name)
self._filer.get.assert_has_calls(
[
mock.call('/status/storage/arrays'),
mock.call('/status/storage/disks')
]
)
self.assertEqual('Could not find any drives or arrays', error.exception.message)
def test_add_volume_invalid_device_name(self):
self._init_filer()
self._filer.get = mock.MagicMock(side_effect=TestEdgeVolumes._mock_no_arrays_multiple_drive)
with self.assertRaises(exception.InputError) as error:
volumes.Volumes(self._filer).add(self._volume_1_name, device='Invalid device name')
self._filer.get.assert_has_calls(
[
mock.call('/status/storage/arrays'),
mock.call('/status/storage/disks')
]
)
self.assertEqual('Invalid device name', error.exception.message)
def test_add_volume_must_specify_device_name(self):
self._init_filer()
self._filer.get = mock.MagicMock(side_effect=TestEdgeVolumes._mock_no_arrays_multiple_drive)
with self.assertRaises(exception.CTERAException) as error:
volumes.Volumes(self._filer).add(self._volume_1_name)
self._filer.get.assert_has_calls(
[
mock.call('/status/storage/arrays'),
mock.call('/status/storage/disks')
]
)
self.assertEqual('You must specify a drive or an array name', error.exception.message)
def test_add_volume_with_device_success(self):
add_response = 'Success'
self._init_filer(add_response=add_response)
self._filer.get = mock.MagicMock(side_effect=TestEdgeVolumes._mock_no_arrays_multiple_drive)
track_volume_creation_status_mock = self.patch_call("cterasdk.edge.volumes.track")
ret = volumes.Volumes(self._filer).add(self._volume_1_name, device=TestEdgeVolumes._drive_1)
self._filer.get.assert_has_calls(
[
mock.call('/status/storage/arrays'),
mock.call('/status/storage/disks')
]
)
track_volume_creation_status_mock.assert_called_once_with(self._filer,
'/status/storage/volumes/' + self._volume_1_name + '/status',
[VolumeStatus.Ok],
[VolumeStatus.Formatting],
[VolumeStatus.Mounting, VolumeStatus.Checking, VolumeStatus.Repairing],
[VolumeStatus.Corrupted, VolumeStatus.Unknown])
self._filer.add.assert_called_once_with('/config/storage/volumes', mock.ANY)
expected_param = self._get_add_volume_param(device=TestEdgeVolumes._drive_1, size=TestEdgeVolumes._drive_size)
actual_param = self._filer.add.call_args[0][1]
self._assert_equal_objects(actual_param, expected_param)
self.assertEqual(ret, add_response)
def test_add_volume_exceeding_drive_size(self):
self._init_filer()
self._filer.get = mock.MagicMock(side_effect=TestEdgeVolumes._mock_no_arrays_single_drive)
with self.assertRaises(exception.InputError) as error:
volumes.Volumes(self._filer).add(self._volume_1_name, size=999999999)
self._filer.get.assert_has_calls(
[
mock.call('/status/storage/arrays'),
mock.call('/status/storage/disks')
]
)
self.assertEqual('You cannot exceed the available storage capacity', error.exception.message)
def test_delete_volume_success(self):
delete_response = 'Success'
self._init_filer(delete_response=delete_response)
self._filer.tasks.by_name = mock.MagicMock(return_value=[TestEdgeVolumes._get_pending_mount_task(self._mount_id)])
self._filer.tasks.wait = mock.MagicMock()
ret = volumes.Volumes(self._filer).delete(self._volume_1_name)
self._filer.tasks.by_name.assert_called_once_with(' '.join(['Mounting', self._volume_1_name, 'file system']))
self._filer.tasks.wait.assert_called_once_with(self._mount_id)
self._filer.delete.assert_called_once_with('/config/storage/volumes/' + self._volume_1_name)
self.assertEqual(ret, delete_response)
def test_delete_volume_raise(self):
self._init_filer()
self._filer.delete = mock.MagicMock(side_effect=exception.CTERAException())
self._filer.tasks.by_name = mock.MagicMock(return_value=[])
with self.assertRaises(exception.CTERAException) as error:
volumes.Volumes(self._filer).delete(self._volume_1_name)
self._filer.tasks.by_name.assert_called_once_with(' '.join(['Mounting', self._volume_1_name, 'file system']))
self._filer.delete.assert_called_once_with('/config/storage/volumes/' + self._volume_1_name)
self.assertEqual('Volume deletion falied', error.exception.message)
def test_delete_all_volume_success(self):
delete_response = 'Success'
self._init_filer(get_response=self._get_volumes_response_param(), delete_response=delete_response)
self._filer.tasks.running = mock.MagicMock(return_value=TestEdgeVolumes._get_pending_mount_tasks())
self._filer.tasks.by_name = mock.MagicMock()
self._filer.tasks.wait = mock.MagicMock()
volumes.Volumes(self._filer).delete_all()
self._filer.get.assert_called_once_with('/config/storage/volumes')
self._filer.tasks.running.assert_called_once()
self._filer.delete.assert_has_calls(
[
mock.call('/config/storage/volumes/' + self._volume_1_name),
mock.call('/config/storage/volumes/' + self._volume_2_name)
]
)
def test_modify_volume_success(self):
before_volume_size = 1000
after_volume_size = 9999
put_response = 'Success'
self._init_filer(get_response=TestEdgeVolumes._get_volume_response(self._volume_1_name, before_volume_size),
put_response=put_response)
ret = volumes.Volumes(self._filer).modify(self._volume_1_name, 9999)
self._filer.get.assert_called_once_with('/config/storage/volumes/' + self._volume_1_name)
self._filer.put.assert_called_once_with('/config/storage/volumes/' + self._volume_1_name, mock.ANY)
expected_param = TestEdgeVolumes._get_volume_response(self._volume_1_name, after_volume_size)
actual_param = self._filer.put.call_args[0][1]
self._assert_equal_objects(actual_param, expected_param)
self.assertEqual(ret, put_response)
def test_modify_volume_not_found(self):
self._init_filer()
self._filer.get = mock.MagicMock(side_effect=exception.CTERAException())
with self.assertRaises(exception.CTERAException) as error:
volumes.Volumes(self._filer).modify(self._volume_1_name, 9999)
self._filer.get.assert_called_once_with('/config/storage/volumes/' + self._volume_1_name)
self.assertEqual('Failed to get the volume', error.exception.message)
@staticmethod
def _get_volume_response(name, size):
param = Object()
param.name = name
param.size = size
return param
def _get_volumes_response_param(self):
storage_volumes = []
for volume_name in self._volumes:
param = Object()
param.name = volume_name
storage_volumes.append(param)
return storage_volumes
def test_delete_no_volumes_found(self):
self._init_filer(get_response=[])
self._filer.tasks.running = mock.MagicMock(return_value=[])
volumes.Volumes(self._filer).delete_all()
self._filer.get.assert_called_once_with('/config/storage/volumes')
@staticmethod
def _get_pending_mount_tasks():
mount_id, task_name = TestEdgeVolumes._mount_task_1
task_1 = TestEdgeVolumes._get_pending_mount_task(mount_id, task_name)
mount_id, task_name = TestEdgeVolumes._mount_task_2
task_2 = TestEdgeVolumes._get_pending_mount_task(mount_id, task_name)
return [task_1, task_2]
@staticmethod
def _get_pending_mount_task(mount_id=None, name=None):
param = Object()
if mount_id:
param.id = mount_id
if name:
param.name = name
return param
@staticmethod
def _get_drive(name, capacity):
param = Object()
param.name = name
param.availableCapacity = capacity
return param
def _get_add_volume_param(self, device=None, size=None, passphrase=None):
param = Object()
param.name = self._volume_1_name
if device:
param.device = device
if size:
param.size = size
param.fileSystemType = 'xfs'
if passphrase:
param.encrypted = True
param.encPassphrase = passphrase
return param
@staticmethod
def _mock_no_devices(path):
if path == '/status/storage/arrays':
return []
if path == '/status/storage/disks':
return []
return None
@staticmethod
def _mock_no_arrays_single_drive(path):
if path == '/status/storage/arrays':
return []
if path == '/status/storage/disks':
return [TestEdgeVolumes._get_drive(TestEdgeVolumes._drive_1, TestEdgeVolumes._drive_size)]
return None
@staticmethod
def _mock_no_arrays_multiple_drive(path):
if path == '/status/storage/arrays':
return []
if path == '/status/storage/disks':
return [TestEdgeVolumes._get_drive(TestEdgeVolumes._drive_1, TestEdgeVolumes._drive_size),
TestEdgeVolumes._get_drive(TestEdgeVolumes._drive_2, TestEdgeVolumes._drive_size)]
return None
|
[
"unittest.mock.MagicMock",
"cterasdk.common.Object",
"cterasdk.edge.volumes.Volumes",
"cterasdk.exception.CTERAException",
"unittest.mock.call"
] |
[((1460, 1532), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'TestEdgeVolumes._mock_no_arrays_single_drive'}), '(side_effect=TestEdgeVolumes._mock_no_arrays_single_drive)\n', (1474, 1532), False, 'from unittest import mock\n'), ((3069, 3141), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'TestEdgeVolumes._mock_no_arrays_single_drive'}), '(side_effect=TestEdgeVolumes._mock_no_arrays_single_drive)\n', (3083, 3141), False, 'from unittest import mock\n'), ((4762, 4822), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'TestEdgeVolumes._mock_no_devices'}), '(side_effect=TestEdgeVolumes._mock_no_devices)\n', (4776, 4822), False, 'from unittest import mock\n'), ((5334, 5408), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'TestEdgeVolumes._mock_no_arrays_multiple_drive'}), '(side_effect=TestEdgeVolumes._mock_no_arrays_multiple_drive)\n', (5348, 5408), False, 'from unittest import mock\n'), ((5935, 6009), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'TestEdgeVolumes._mock_no_arrays_multiple_drive'}), '(side_effect=TestEdgeVolumes._mock_no_arrays_multiple_drive)\n', (5949, 6009), False, 'from unittest import mock\n'), ((6585, 6659), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'TestEdgeVolumes._mock_no_arrays_multiple_drive'}), '(side_effect=TestEdgeVolumes._mock_no_arrays_multiple_drive)\n', (6599, 6659), False, 'from unittest import mock\n'), ((8147, 8219), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'side_effect': 'TestEdgeVolumes._mock_no_arrays_single_drive'}), '(side_effect=TestEdgeVolumes._mock_no_arrays_single_drive)\n', (8161, 8219), False, 'from unittest import mock\n'), ((8944, 8960), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (8958, 8960), False, 'from unittest import mock\n'), ((9557, 9588), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '[]'}), '(return_value=[])\n', (9571, 9588), False, 'from unittest import mock\n'), ((10354, 10370), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (10368, 10370), False, 'from unittest import mock\n'), ((10404, 10420), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (10418, 10420), False, 'from unittest import mock\n'), ((12246, 12254), 'cterasdk.common.Object', 'Object', ([], {}), '()\n', (12252, 12254), False, 'from cterasdk.common import Object\n'), ((12705, 12736), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '[]'}), '(return_value=[])\n', (12719, 12736), False, 'from unittest import mock\n'), ((13319, 13327), 'cterasdk.common.Object', 'Object', ([], {}), '()\n', (13325, 13327), False, 'from cterasdk.common import Object\n'), ((13520, 13528), 'cterasdk.common.Object', 'Object', ([], {}), '()\n', (13526, 13528), False, 'from cterasdk.common import Object\n'), ((13714, 13722), 'cterasdk.common.Object', 'Object', ([], {}), '()\n', (13720, 13722), False, 'from cterasdk.common import Object\n'), ((12463, 12471), 'cterasdk.common.Object', 'Object', ([], {}), '()\n', (12469, 12471), False, 'from cterasdk.common import Object\n'), ((801, 829), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (816, 829), False, 'from cterasdk.edge import volumes\n'), ((1086, 1114), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (1101, 1114), False, 'from cterasdk.edge import volumes\n'), ((1638, 1666), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (1653, 1666), False, 'from cterasdk.edge import volumes\n'), ((1764, 1799), 'unittest.mock.call', 'mock.call', (['"""/status/storage/arrays"""'], {}), "('/status/storage/arrays')\n", (1773, 1799), False, 'from unittest import mock\n'), ((1817, 1851), 'unittest.mock.call', 'mock.call', (['"""/status/storage/disks"""'], {}), "('/status/storage/disks')\n", (1826, 1851), False, 'from unittest import mock\n'), ((3247, 3275), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (3262, 3275), False, 'from cterasdk.edge import volumes\n'), ((3409, 3444), 'unittest.mock.call', 'mock.call', (['"""/status/storage/arrays"""'], {}), "('/status/storage/arrays')\n", (3418, 3444), False, 'from unittest import mock\n'), ((3462, 3496), 'unittest.mock.call', 'mock.call', (['"""/status/storage/disks"""'], {}), "('/status/storage/disks')\n", (3471, 3496), False, 'from unittest import mock\n'), ((5028, 5063), 'unittest.mock.call', 'mock.call', (['"""/status/storage/arrays"""'], {}), "('/status/storage/arrays')\n", (5037, 5063), False, 'from unittest import mock\n'), ((5081, 5115), 'unittest.mock.call', 'mock.call', (['"""/status/storage/disks"""'], {}), "('/status/storage/disks')\n", (5090, 5115), False, 'from unittest import mock\n'), ((5640, 5675), 'unittest.mock.call', 'mock.call', (['"""/status/storage/arrays"""'], {}), "('/status/storage/arrays')\n", (5649, 5675), False, 'from unittest import mock\n'), ((5693, 5727), 'unittest.mock.call', 'mock.call', (['"""/status/storage/disks"""'], {}), "('/status/storage/disks')\n", (5702, 5727), False, 'from unittest import mock\n'), ((6215, 6250), 'unittest.mock.call', 'mock.call', (['"""/status/storage/arrays"""'], {}), "('/status/storage/arrays')\n", (6224, 6250), False, 'from unittest import mock\n'), ((6268, 6302), 'unittest.mock.call', 'mock.call', (['"""/status/storage/disks"""'], {}), "('/status/storage/disks')\n", (6277, 6302), False, 'from unittest import mock\n'), ((6765, 6793), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (6780, 6793), False, 'from cterasdk.edge import volumes\n'), ((6924, 6959), 'unittest.mock.call', 'mock.call', (['"""/status/storage/arrays"""'], {}), "('/status/storage/arrays')\n", (6933, 6959), False, 'from unittest import mock\n'), ((6977, 7011), 'unittest.mock.call', 'mock.call', (['"""/status/storage/disks"""'], {}), "('/status/storage/disks')\n", (6986, 7011), False, 'from unittest import mock\n'), ((8437, 8472), 'unittest.mock.call', 'mock.call', (['"""/status/storage/arrays"""'], {}), "('/status/storage/arrays')\n", (8446, 8472), False, 'from unittest import mock\n'), ((8490, 8524), 'unittest.mock.call', 'mock.call', (['"""/status/storage/disks"""'], {}), "('/status/storage/disks')\n", (8499, 8524), False, 'from unittest import mock\n'), ((8975, 9003), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (8990, 9003), False, 'from cterasdk.edge import volumes\n'), ((9493, 9519), 'cterasdk.exception.CTERAException', 'exception.CTERAException', ([], {}), '()\n', (9517, 9519), False, 'from cterasdk import exception\n'), ((10429, 10457), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (10444, 10457), False, 'from cterasdk.edge import volumes\n'), ((10676, 10735), 'unittest.mock.call', 'mock.call', (["('/config/storage/volumes/' + self._volume_1_name)"], {}), "('/config/storage/volumes/' + self._volume_1_name)\n", (10685, 10735), False, 'from unittest import mock\n'), ((10753, 10812), 'unittest.mock.call', 'mock.call', (["('/config/storage/volumes/' + self._volume_2_name)"], {}), "('/config/storage/volumes/' + self._volume_2_name)\n", (10762, 10812), False, 'from unittest import mock\n'), ((11163, 11191), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (11178, 11191), False, 'from cterasdk.edge import volumes\n'), ((11823, 11849), 'cterasdk.exception.CTERAException', 'exception.CTERAException', ([], {}), '()\n', (11847, 11849), False, 'from cterasdk import exception\n'), ((12745, 12773), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (12760, 12773), False, 'from cterasdk.edge import volumes\n'), ((4902, 4930), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (4917, 4930), False, 'from cterasdk.edge import volumes\n'), ((5484, 5512), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (5499, 5512), False, 'from cterasdk.edge import volumes\n'), ((6089, 6117), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (6104, 6117), False, 'from cterasdk.edge import volumes\n'), ((8295, 8323), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (8310, 8323), False, 'from cterasdk.edge import volumes\n'), ((9668, 9696), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (9683, 9696), False, 'from cterasdk.edge import volumes\n'), ((11930, 11958), 'cterasdk.edge.volumes.Volumes', 'volumes.Volumes', (['self._filer'], {}), '(self._filer)\n', (11945, 11958), False, 'from cterasdk.edge import volumes\n')]
|
from pathlib import Path
import os
import django_heroku
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'requests',
'starred_repos',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
]
TAGGIT_CASE_INSENSITIVE = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware'
]
ROOT_URLCONF = 'githeart.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect'
],
},
},
]
WSGI_APPLICATION = 'githeart.wsgi.application'
DJANGO_SETTINGS_MODULE = 'githeart.settings'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'githeart',
'USER': 'postgres',
'PASSWORD': '<PASSWORD>',
'HOST': 'localhost'
}
}
AUTHENTICATION_BACKENDS = (
'social_core.backends.github.GithubOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'githeart/static')
]
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
messages.SUCCESS: 'success'
}
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'home'
# GitHub Login
SOCIAL_AUTH_GITHUB_KEY = '<KEY>'
SOCIAL_AUTH_GITHUB_SECRET = '1917f9ce2cc914a7935e8eca01fb4fa3d3d771e9'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'githeart/static')
]
django_heroku.settings(locals())
|
[
"os.path.abspath",
"os.path.join"
] |
[((2828, 2860), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""static"""'], {}), "(BASE_DIR, 'static')\n", (2840, 2860), False, 'import os\n'), ((3301, 3333), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""static"""'], {}), "(BASE_DIR, 'static')\n", (3313, 3333), False, 'import os\n'), ((2910, 2951), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""githeart/static"""'], {}), "(BASE_DIR, 'githeart/static')\n", (2922, 2951), False, 'import os\n'), ((3383, 3424), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""githeart/static"""'], {}), "(BASE_DIR, 'githeart/static')\n", (3395, 3424), False, 'import os\n'), ((101, 126), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (116, 126), False, 'import os\n'), ((1231, 1266), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""templates"""'], {}), "(BASE_DIR, 'templates')\n", (1243, 1266), False, 'import os\n')]
|
#!/usr/bin/env python
# module TEST_ACD
import unittest
import numpy as np
from .test_common import BaseCommon
from pylocus.point_set import PointSet
from pylocus.algorithms import reconstruct_acd
from pylocus.simulation import create_noisy_edm
class TestACD(BaseCommon.TestAlgorithms):
def setUp(self):
BaseCommon.TestAlgorithms.setUp(self)
self.create_points()
self.n_it = 10
def create_points(self, N=5, d=2):
print('TestACD:create_points')
self.pts = PointSet(N, d)
self.pts.set_points('random')
self.pts.init()
self.index = 0
def call_method(self, method=''):
print('TestACD:call_method')
Xhat, costs = reconstruct_acd(self.pts.edm,
W=np.ones(self.pts.edm.shape),
X0=self.pts.points,
print_out=False, sweeps=3)
return Xhat
def add_noise(self, noise=1e-6):
self.pts.edm = create_noisy_edm(self.pts.edm, noise)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"pylocus.simulation.create_noisy_edm",
"numpy.ones",
"pylocus.point_set.PointSet"
] |
[((1073, 1088), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1086, 1088), False, 'import unittest\n'), ((507, 521), 'pylocus.point_set.PointSet', 'PointSet', (['N', 'd'], {}), '(N, d)\n', (515, 521), False, 'from pylocus.point_set import PointSet\n'), ((1002, 1039), 'pylocus.simulation.create_noisy_edm', 'create_noisy_edm', (['self.pts.edm', 'noise'], {}), '(self.pts.edm, noise)\n', (1018, 1039), False, 'from pylocus.simulation import create_noisy_edm\n'), ((773, 800), 'numpy.ones', 'np.ones', (['self.pts.edm.shape'], {}), '(self.pts.edm.shape)\n', (780, 800), True, 'import numpy as np\n')]
|
"""
PROBLEM
<NAME> was born on 15 April 1707.
Consider the sequence 1504170715041707n mod 4503599627370517.
An element of this sequence is defined to be an Eulercoin if it is strictly smaller than all previously found Eulercoins.
For example, the first term is 1504170715041707 which is the first Eulercoin. The second term is 3008341430083414 which
is greater than 1504170715041707 so is not an Eulercoin. However, the third term is 8912517754604 which is small enough
to be a new Eulercoin.
The sum of the first 2 Eulercoins is therefore 1513083232796311.
Find the sum of all Eulercoins.
ANSWER:
1517926517777556
Solve time ~0.003 seconds
"""
import unittest
from util.utils import timeit
# Explanation from ProjectEuler user RubiksCube:
# After brute forcing the first 15 Eulercoins I tried the Euclidean algorithm and found that I got every coin and the
# distance between the coins from the step-by-step in the Euclidean algorithm.
#
# Short example:
# Start with 2 steps and use the last right hand side to get the first coin.
# 4503599627370517 = 1504170715041707 * 2 + 1495258197287103
# 1504170715041707 = 1495258197287103 * 1 + 8912517754604
#
# First coin: 1495258197287103 * 1 + 8912517754604 = 1504170715041707
#
# Do two steps again:
# 1495258197287103 = 8912517754604 * 167 + 6867732268235
# 8912517754604 = 6867732268235 * 1 + 2044785486369
#
# Second coin: 6867732268235 * 1 + 2044785486369 = 8912517754604
#
# Do two more steps, note the "2" giving us 2 coins.
# 6867732268235 = 2044785486369 * 3 + 733375809128
# 2044785486369 = 733375809128 * 2 + 578033868113
#
# Third coin: 733375809128 * 2 + 578033868113 = 2044785486369
# Fourth coin: 733375809128 * 1 + 578033868113 = 1311409677241
#
# Repeat until the Euclidean algorithm is finished
class Problem700:
@timeit
def solve(self, a, m):
res = 0
while a > 0:
res += a
a, m = -m % a, a
return res
class Solution700(unittest.TestCase):
def setUp(self):
self.problem = Problem700()
def test_solution(self):
a = 1504170715041707 # 17 × 1249 × 12043 × 5882353
m = 4503599627370517 # prime number
self.assertEqual(1517926517777556, self.problem.solve(a, m))
if __name__ == '__main__':
unittest.main()
# [(euler_coin, n)]
# [(1504170715041707, 1), (8912517754604, 3), (2044785486369, 506), (1311409677241, 2527), (578033868113, 4548),
# (422691927098, 11117), (267349986083, 17686), (112008045068, 24255), (68674149121, 55079), (25340253174, 85903),
# (7346610401, 202630), (4046188430, 724617), (745766459, 1246604), (428410324, 6755007), (111054189, 12263410),
# (15806432, 42298633), (15397267, 326125654), (14988102, 609952675), (14578937, 893779696), (14169772, 1177606717),
# (13760607, 1461433738), (13351442, 1745260759), (12942277, 2029087780), (12533112, 2312914801), (12123947, 2596741822),
# (11714782, 2880568843), (11305617, 3164395864), (10896452, 3448222885), (10487287, 3732049906), (10078122, 4015876927),
# (9668957, 4299703948), (9259792, 4583530969), (8850627, 4867357990), (8441462, 5151185011), (8032297, 5435012032),
# (7623132, 5718839053), (7213967, 6002666074), (6804802, 6286493095), (6395637, 6570320116), (5986472, 6854147137),
# (5577307, 7137974158), (5168142, 7421801179), (4758977, 7705628200), (4349812, 7989455221), (3940647, 8273282242),
# (3531482, 8557109263), (3122317, 8840936284), (2713152, 9124763305), (2303987, 9408590326), (1894822, 9692417347),
# (1485657, 9976244368), (1076492, 10260071389), (667327, 10543898410), (258162, 10827725431), (107159, 21939277883),
# (63315, 54990108218), (19471, 88040938553), (14569, 297173645994), (9667, 506306353435), (4765, 715439060876),
# (4628, 1640010829193), (4491, 2564582597510), (4354, 3489154365827), (4217, 4413726134144), (4080, 5338297902461),
# (3943, 6262869670778), (3806, 7187441439095), (3669, 8112013207412), (3532, 9036584975729), (3395, 9961156744046),
# (3258, 10885728512363), (3121, 11810300280680), (2984, 12734872048997), (2847, 13659443817314), (2710, 14584015585631),
# (2573, 15508587353948), (2436, 16433159122265), (2299, 17357730890582), (2162, 18282302658899), (2025, 19206874427216),
# (1888, 20131446195533), (1751, 21056017963850), (1614, 21980589732167), (1477, 22905161500484), (1340, 23829733268801),
# (1203, 24754305037118), (1066, 25678876805435), (929, 26603448573752), (792, 27528020342069), (655, 28452592110386),
# (518, 29377163878703), (381, 30301735647020), (244, 31226307415337), (107, 32150879183654), (77, 65226330135625),
# (47, 98301781087596), (17, 131377232039567), (4, 295829915031105), (3, 1347772343115958), (2, 2399714771200811),
# (1, 3451657199285664)]
|
[
"unittest.main"
] |
[((2266, 2281), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2279, 2281), False, 'import unittest\n')]
|
#!/usr/bin/env python
from pwn import *
import base64
r = remote('bamboofox.cs.nctu.edu.tw', 58789)
token = 'user=<PASSWORD>00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xc0user=<PASSWORD>'
auth = '<PASSWORD>'
r.recvuntil('input your token: ')
r.sendline(base64.b64encode(token))
r.recvuntil('input your authentication code: ')
r.sendline(auth)
r.interactive()
|
[
"base64.b64encode"
] |
[((457, 480), 'base64.b64encode', 'base64.b64encode', (['token'], {}), '(token)\n', (473, 480), False, 'import base64\n')]
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'konakona'
__VERSION__ = "v1.0"
import os,sys,os.path,pycurl,cStringIO,json
python_version = sys.version_info[0]
if(python_version !=2):
print("本系统依赖于python2.7,您的系统不兼容, goodbye!")
exit()
start_date = raw_input("1.请输入开始日期(YYYY-MM-DD):")
end_date = raw_input("2.请输入结束日期(YYYY-MM-DD):")
start_page = raw_input("3.请输入需要从第几页开始(可以不填):")
end_page = raw_input("4.请输入需要结束页(可以不填):")
echo_flag = raw_input("5.输入'Y'代表保存文件,输入'N'代表打印结果:")
if(echo_flag!='Y' and echo_flag!='N'):
print("您的输入有误")
exit()
if(start_date=="" or end_date == ""):
print("请输入开始和结束日期")
exit()
if (start_date > end_date):
print("开始日期不能大于结束日期!")
exit()
if(type(start_page)!=int):
start_page = 2;
else:
if(start_page > 2):
pass
else:
start_page = 2
print("程序初始化...")
content = cStringIO.StringIO()
buf = cStringIO.StringIO()
file = open('emails.txt','wa')
#请修改为你的email和key
config = ['input your email','input your key']
def runCurl(start_date,end_date,page = 1):
api_url = 'https://api.livechatinc.com/chats?date_from='+start_date+"&date_to="+end_date+"&page="+bytes(page)+"&has_goal=1"
c = pycurl.Curl()
c.setopt(c.URL, api_url)
c.setopt(c.WRITEFUNCTION, buf.write)
c.setopt(c.USERPWD, config[0]+":"+config[1])
c.setopt(c.CONNECTTIMEOUT, 0)
c.setopt(c.TIMEOUT, 0)
# c.setopt(c.PROXY, 'http://www.crazyphper.com') #如果你需要设置代理访问
c.perform()
if(c.getinfo(c.RESPONSE_CODE) != 200):
print("[Warring] 在请求第"+page+"页时,失败了!")
# return
c.close()
# HTTP response code, e.g. 200.
# print('Status: %d' % c.getinfo(c.RESPONSE_CODE))
# Elapsed time for the transfer.
# print('Status: %f' % c.getinfo(c.TOTAL_TIME))
def saveContent(email):
if(email != ""):
content.write(email+"\n")
def saveFile(email):
if(email != ""):
file.write(email+"\n")
#-----------------程序正式开始-----------------
runCurl(start_date,end_date)
json_val = json.loads(buf.getvalue())
buf.seek(0)
buf.truncate()
if(type(end_page)!=int):
totalPage = json_val["pages"]
else:
if(end_page > 2):
if(end_page > json_val["pages"]):
totalPage = json_val["pages"]
else:
totalPage = end_page
else:
totalPage = json_val["pages"]
if(start_page >2):
pass
else:
for kk in json_val["chats"]:
if(echo_flag == 'Y'):
saveFile(kk["prechat_survey"][1]["value"])
else:
saveContent(kk["prechat_survey"][1]["value"])
for page in range(start_page,totalPage):
runCurl(start_date,end_date,page)
json_val = json.loads(buf.getvalue())
buf.seek(0)
buf.truncate()
for kk in json_val["chats"]:
if(echo_flag == 'Y'):
saveFile(kk["prechat_survey"][1]["value"])
else:
saveContent(kk["prechat_survey"][1]["value"])
if(echo_flag == 'Y'):
file.close()
else:
print(content.getvalue())
content.close()
if(raw_input("程序执行完毕,请按任意键结束...")):
exit()
|
[
"cStringIO.StringIO",
"pycurl.Curl"
] |
[((860, 880), 'cStringIO.StringIO', 'cStringIO.StringIO', ([], {}), '()\n', (878, 880), False, 'import os, sys, os.path, pycurl, cStringIO, json\n'), ((887, 907), 'cStringIO.StringIO', 'cStringIO.StringIO', ([], {}), '()\n', (905, 907), False, 'import os, sys, os.path, pycurl, cStringIO, json\n'), ((1186, 1199), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (1197, 1199), False, 'import os, sys, os.path, pycurl, cStringIO, json\n')]
|
''' network architecture for backbone '''
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import models.archs.arch_util as arch_util
import numpy as np
import math
import pdb
from torch.nn.modules.utils import _pair
from models.archs.dcn.deform_conv import ModulatedDeformConvPack as DCN
class SimpleBlock(nn.Module):
def __init__(self, depth=3, n_channels=64, input_channels=3, output_channel=64, kernel_size=3):
super(SimpleBlock, self).__init__()
padding = 1
layers = []
layers.append(nn.Conv2d(in_channels=input_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding, bias=True))
layers.append(nn.LeakyReLU(negative_slope=0.1, inplace=True))
for _ in range(depth - 2):
layers.append(nn.Conv2d(in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding, bias=False))
layers.append(nn.LeakyReLU(negative_slope=0.1, inplace=True))
layers.append(nn.Conv2d(in_channels=n_channels, out_channels=output_channel, kernel_size=kernel_size, padding=padding, bias=False))
self.simple_block = nn.Sequential(*layers)
self._initialize_weights()
def forward(self, x):
out = self.simple_block(x)
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.orthogonal_(m.weight)
print('init weight')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class DualABlock(nn.Module):
def __init__(self, res_num=5, n_channels=64, input_channels=3, output_channel=64, kernel_size=3):
super(DualABlock, self).__init__()
padding = 1
self.res_num = res_num
self.square_conv = nn.Conv2d(in_channels=input_channels, out_channels=n_channels, \
kernel_size=(kernel_size, kernel_size), padding=(padding, padding), bias=False)
self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.extract_conv = nn.Conv2d(in_channels=n_channels, out_channels=n_channels, \
kernel_size=kernel_size, padding=padding, bias=True)
self.res_block1 = SimpleBlock(depth=2, n_channels=n_channels, input_channels=n_channels, \
output_channel=n_channels, kernel_size=3) # 64, H, W
self.res_block2 = SimpleBlock(depth=2, n_channels=n_channels, input_channels=n_channels, \
output_channel=n_channels, kernel_size=3) # 64, H, W
self.down = nn.Conv2d(in_channels=n_channels, out_channels=int(n_channels/2), kernel_size=1, stride=1, bias=True)
self.up = nn.Conv2d(in_channels=int(n_channels/2), out_channels=n_channels, kernel_size=1, stride=1, bias=True)
self.spatial_att = nn.Conv2d(in_channels=n_channels, out_channels=1, kernel_size=7, stride=1, padding=3,bias=True)
self._initialize_weights()
def forward(self, x):
x_temp = self.square_conv(x)
x_temp = self.relu(x_temp)
x_temp = self.extract_conv(x_temp)
x_temp = x + x_temp
x_temp2 = self.res_block1(x_temp)
x_temp = x_temp + x_temp2
x_temp2 = self.res_block2(x_temp)
x_temp = x_temp + x_temp2
# channel attention
x_se = F.avg_pool2d(x_temp, kernel_size=(x_temp.size(2), x_temp.size(3)))
x_se = self.down(x_se)
x_se = self.relu(x_se)
x_se = self.up(x_se)
x_se = F.sigmoid(x_se)
x_se = x_se.repeat(1, 1, x_temp.size(2), x_temp.size(3))
# spatial attention
x_sp = F.sigmoid(self.spatial_att(x_temp))
x_temp = x_temp + x_temp * x_se + x_temp * x_sp
return x_temp
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.orthogonal_(m.weight)
print('init weight')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class GCABlock(nn.Module):
def __init__(self, res_num=5, n_channels=64, input_channels=3, output_channel=64, kernel_size=3):
super(GCABlock, self).__init__()
padding = 1
self.res_num = res_num
self.square_conv = nn.Conv2d(in_channels=input_channels, out_channels=n_channels, \
kernel_size=kernel_size, padding=padding, bias=False)
self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.extract_conv = nn.Conv2d(in_channels=n_channels, out_channels=n_channels, \
kernel_size=kernel_size, padding=padding, bias=True)
self.res_block1 = SimpleBlock(depth=2, n_channels=n_channels, input_channels=n_channels, \
output_channel=n_channels, kernel_size=3) # 64, H, W
self.res_block2 = SimpleBlock(depth=2, n_channels=n_channels, input_channels=n_channels, \
output_channel=n_channels, kernel_size=3) # 64, H, W
self.down = nn.Conv2d(in_channels=n_channels, out_channels=int(n_channels/2), kernel_size=1, stride=1, bias=True)
self.up = nn.Conv2d(in_channels=int(n_channels/2), out_channels=n_channels, kernel_size=1, stride=1, bias=True)
self.spatial_att = nn.Conv2d(in_channels=n_channels, out_channels=1, kernel_size=7, stride=1, padding=3,bias=True)
self._initialize_weights()
def forward(self, x, guided_lam, guided_beta):
x_temp = self.square_conv(x)
x_temp = x_temp.mul(guided_lam) + guided_beta
x_temp = self.relu(x_temp)
x_temp = self.extract_conv(x_temp)
x_temp = x + x_temp
x_temp2 = self.res_block1(x_temp)
x_temp = x_temp + x_temp2
x_temp2 = self.res_block2(x_temp)
x_temp = x_temp + x_temp2
# channel attention
x_se = F.avg_pool2d(x_temp, kernel_size=(x_temp.size(2), x_temp.size(3)))
x_se = self.down(x_se)
x_se = self.relu(x_se)
x_se = self.up(x_se)
x_se = F.sigmoid(x_se)
x_se = x_se.repeat(1, 1, x_temp.size(2), x_temp.size(3))
# spatial attention
x_sp = F.sigmoid(self.spatial_att(x_temp))
x_temp = x_temp + x_temp * x_se + x_temp * x_sp
return x_temp
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.orthogonal_(m.weight)
print('init weight')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class SimpleLSTM(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(SimpleLSTM, self).__init__()
self.nf = input_dim
self.hf = hidden_dim
self.conv = nn.Conv2d(self.nf+self.hf, 4*self.hf, 3, 1, 1, bias=True)
def forward(self, input_tensor, h_cur, c_cur):
if h_cur is None:
tensor_size = (input_tensor.size(2),input_tensor.size(3))
h_cur = self._init_hidden(batch_size=input_tensor.size(0),tensor_size=tensor_size)
c_cur = self._init_hidden(batch_size=input_tensor.size(0),tensor_size=tensor_size)
combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hf, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
def _init_hidden(self, batch_size, tensor_size):
height, width = tensor_size
return torch.zeros(batch_size, self.hf, height, width).cuda()
class PCD_Align(nn.Module):
''' Alignment module using Pyramid, Cascading and Deformable convolution
with 3 pyramid levels.
'''
def __init__(self, nf=64, groups=8):
super(PCD_Align, self).__init__()
# L3: level 3, 1/4 spatial size
self.L3_offset_conv1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for diff
self.L3_rnn = SimpleLSTM(nf, int(nf/2))
self.L3_rnn_conv = nn.Conv2d(nf+int(nf/2), nf, 3, 1, 1, bias=True)
self.L3_offset_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.L3_dcnpack = DCN(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,
extra_offset_mask=True)
# L2: level 2, 1/2 spatial size
self.L2_offset_conv1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for diff
self.L2_rnn = SimpleLSTM(nf, int(nf/2))
self.L2_rnn_conv = nn.Conv2d(nf+int(nf/2), nf, 3, 1, 1, bias=True)
self.L2_offset_conv2 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for offset
self.L2_offset_conv3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.L2_dcnpack = DCN(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,
extra_offset_mask=True)
self.L2_fea_conv = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for fea
# L1: level 1, original spatial size
self.L1_offset_conv1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for diff
self.L1_rnn = SimpleLSTM(nf, int(nf/2))
self.L1_rnn_conv = nn.Conv2d(nf+int(nf/2), nf, 3, 1, 1, bias=True)
self.L1_offset_conv2 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for offset
self.L1_offset_conv3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.L1_dcnpack = DCN(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,
extra_offset_mask=True)
self.L1_fea_conv = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for fea
# Cascading DCN
self.cas_offset_conv1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for diff
self.cas_offset_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.cas_dcnpack = DCN(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,
extra_offset_mask=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, nbr_fea_l, ref_fea_l, guided_nbr_fea_l, guided_ref_fea_l, h_cur, c_cur):
'''align other neighboring frames to the reference frame in the feature level
nbr_fea_l, ref_fea_l: [L1, L2, L3], each with [B,C,H,W] features
'''
h_next = []
c_next = []
# L3
L3_comine = torch.cat((guided_nbr_fea_l[2], guided_ref_fea_l[2]), dim=1)
L3_offset = self.lrelu(self.L3_offset_conv1(L3_comine))
L3_offset_temp, c_out = self.L3_rnn(L3_offset, h_cur[0], c_cur[0])
h_next.append(L3_offset_temp)
c_next.append(c_out)
L3_offset = torch.cat((L3_offset, L3_offset_temp), dim=1)
L3_offset = self.lrelu(self.L3_rnn_conv(L3_offset))
L3_offset = self.lrelu(self.L3_offset_conv2(L3_offset))
L3_fea = self.lrelu(self.L3_dcnpack([nbr_fea_l[2], L3_offset]))
# L2
L2_comine = torch.cat((guided_nbr_fea_l[1], guided_ref_fea_l[1]), dim=1)
L2_offset = self.lrelu(self.L2_offset_conv1(L2_comine))
L2_offset_temp, c_out = self.L2_rnn(L2_offset, h_cur[1], c_cur[1])
h_next.append(L2_offset_temp)
c_next.append(c_out)
L2_offset = torch.cat((L2_offset, L2_offset_temp), dim=1)
L2_offset = self.lrelu(self.L2_rnn_conv(L2_offset))
L3_offset = F.interpolate(L3_offset, scale_factor=2, mode='bilinear', align_corners=False)
L2_offset = self.lrelu(self.L2_offset_conv2(torch.cat([L2_offset, L3_offset * 2], dim=1)))
L2_offset = self.lrelu(self.L2_offset_conv3(L2_offset))
L2_fea = self.L2_dcnpack([nbr_fea_l[1], L2_offset])
L3_fea = F.interpolate(L3_fea, scale_factor=2, mode='bilinear', align_corners=False)
L2_fea = self.lrelu(self.L2_fea_conv(torch.cat([L2_fea, L3_fea], dim=1)))
# L1
L1_comine = torch.cat((guided_nbr_fea_l[0], guided_ref_fea_l[0]), dim=1)
L1_offset = self.L1_offset_conv1(L1_comine)
L1_offset_temp, c_out = self.L1_rnn(L1_offset, h_cur[2], c_cur[2])
h_next.append(L1_offset_temp)
c_next.append(c_out)
L1_offset = torch.cat((L1_offset, L1_offset_temp), dim=1)
L1_offset = self.lrelu(self.L1_rnn_conv(L1_offset))
L2_offset = F.interpolate(L2_offset, scale_factor=2, mode='bilinear', align_corners=False)
L1_offset = self.lrelu(self.L1_offset_conv2(torch.cat([L1_offset, L2_offset * 2], dim=1)))
L1_offset = self.lrelu(self.L1_offset_conv3(L1_offset))
L1_fea = self.L1_dcnpack([nbr_fea_l[0], L1_offset])
L2_fea = F.interpolate(L2_fea, scale_factor=2, mode='bilinear', align_corners=False)
L1_fea = self.L1_fea_conv(torch.cat([L1_fea, L2_fea], dim=1))
# Cascading
offset_comine = torch.cat((L1_fea, ref_fea_l[0]), dim=1)
offset = self.lrelu(self.cas_offset_conv1(offset_comine))
offset = self.lrelu(self.cas_offset_conv2(offset))
L1_fea = self.lrelu(self.cas_dcnpack([L1_fea, offset]))
return L1_fea, h_next, c_next
class GCPNet(nn.Module):
def __init__(self, nf=64, nframes=5, groups=8, in_channel=1, output_channel=1, center=None):
super(GCPNet, self).__init__()
self.nf = nf
self.center = nframes // 2 if center is None else center
self.nframes = nframes
## GCP Branch
self.feature_guided1 = SimpleBlock(depth=3, n_channels=nf, input_channels=in_channel*2*2, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_guided1_lam = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided1_beta = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided2 = SimpleBlock(depth=3, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_guided2_lam = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided2_beta = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided3 = SimpleBlock(depth=3, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_guided3_lam = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided3_beta = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided4 = SimpleBlock(depth=3, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_guided4_lam = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided4_beta = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided5 = SimpleBlock(depth=3, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_guided6 = SimpleBlock(depth=3, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_guided6_up = nn.ConvTranspose2d(in_channels=nf, out_channels=nf,\
kernel_size=2, stride=2, padding=0, bias=True) # 64, H*2, W*2
self.feature_guided6_lam = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided6_beta = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
## IntraF Module
self.feature_extract = SimpleBlock(depth=5, n_channels=nf, input_channels=in_channel*4*2, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_extract_acse1 = GCABlock(res_num=2, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_extract_acse2 = GCABlock(res_num=2, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_extract_acse3 = GCABlock(res_num=2, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H*2, W*2
self.feature_extract_acse4 = GCABlock(res_num=2, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H*2, W*2
## InterF Module
self.fea_L2_conv1 = nn.Conv2d(nf, nf, 3, 2, 1, bias=True)
self.fea_L2_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.fea_L3_conv1 = nn.Conv2d(nf, nf, 3, 2, 1, bias=True)
self.fea_L3_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.pcd_align = PCD_Align(nf=nf, groups=groups)
self.merge = nn.Conv2d(nf*nframes, nf, 3, 1, 1, bias=True)
self.feature_up = nn.ConvTranspose2d(in_channels=nf, out_channels=nf,\
kernel_size=2, stride=2, padding=0, bias=True) # 64, H*2, W*2
# encoder
self.conv_block_s1 = SimpleBlock(depth=2, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H*2, W*2
self.acse_block_s1 = DualABlock(res_num=2, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H*2, W*2
self.pool1 = nn.Conv2d(nf, 2*nf, 3, 2, 1, bias=True) # 128
self.conv_block_s2 = SimpleBlock(depth=2, n_channels=2*nf, input_channels=2*nf, \
output_channel=2*nf, kernel_size=3) # 128, H, W
self.acse_block_s2 = DualABlock(res_num=2, n_channels=2*nf, input_channels=2*nf, \
output_channel=2*nf, kernel_size=3) # 128, H, W
self.pool2 = nn.Conv2d(2*nf, 4*nf, 3, 2, 1, bias=True) # 256
self.conv_block_s3 = SimpleBlock(depth=2, n_channels=4*nf, input_channels=4*nf, \
output_channel=4*nf, kernel_size=3) # 256, H//2, W//2
self.acse_block_s3 = DualABlock(res_num=2, n_channels=4*nf, input_channels=4*nf, \
output_channel=4*nf, kernel_size=3) # 256, H//2, W//2
self.conv_block_s3_2 = SimpleBlock(depth=2, n_channels=4*nf, input_channels=4*nf, \
output_channel=4*nf, kernel_size=3) # 256, H//2, W//2
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
# decoder
self.up1 = nn.ConvTranspose2d(in_channels=4*nf, out_channels=2*nf,\
kernel_size=2, stride=2, padding=0, bias=True) # 128, H, W
### With SkipConnection
# cat with conv_block_s4 # 128, H, W
self.conv_block_s4 = SimpleBlock(depth=2, n_channels=2*nf, input_channels=4*nf, \
output_channel=2*nf, kernel_size=3) # 128, H, W
self.acse_block_s4 = DualABlock(res_num=2, n_channels=2*nf, input_channels=2*nf, \
output_channel=2*nf, kernel_size=3) # 128, H, W
self.up2 = nn.ConvTranspose2d(in_channels=2*nf, out_channels=nf,\
kernel_size=2, stride=2, padding=0, bias=True) # 64, H*2, W*2
# cat with conv_block_s3 # 64, H*2, W*2
self.conv_block_s5 = SimpleBlock(depth=3, n_channels=nf, input_channels=2*nf, \
output_channel=output_channel, kernel_size=3) # 64, H*2, W*2
def forward(self, x, nmap):
B, N, C, H, W = x.size() # N video frames, C is 4 response to RGGB channel
# GCP Branch
x_gr = x[:,:,1:3,:,:].clone()
x_gr_map = nmap[:,:,1:3,:,:].clone()
x_gr = x_gr.view(-1, int(C/2), H, W)
x_gr_map = x_gr_map.view(-1, int(C/2), H, W)
temp = torch.cat([x_gr, x_gr_map], dim=1)
x_gr1 = self.feature_guided1(temp)
x_gr1_lam = self.feature_guided1_lam(x_gr1)
x_gr1_beta = self.feature_guided1_beta(x_gr1)
x_gr2 = self.feature_guided2(x_gr1)
x_gr2_lam = self.feature_guided2_lam(x_gr2)
x_gr2_beta = self.feature_guided2_beta(x_gr2)
x_gr3 = self.feature_guided3(x_gr2)
x_gr3_lam = self.feature_guided3_lam(x_gr3)
x_gr3_beta = self.feature_guided3_beta(x_gr3)
x_gr4 = self.feature_guided4(x_gr3)
x_gr4_lam = self.feature_guided4_lam(x_gr4)
x_gr4_beta = self.feature_guided4_beta(x_gr4)
x_gr5 = self.feature_guided5(x_gr4)
x_gr5 = x_gr5.view(B, N, -1, H, W)
x_gr6 = self.feature_guided6(x_gr5[:, self.center, :, :, :])
x_gr5 = x_gr5.view(B*N, -1, H, W)
x_gr6 = self.feature_guided6_up(x_gr6)
x_gr6_lam = self.feature_guided6_lam(x_gr6)
x_gr6_beta = self.feature_guided6_beta(x_gr6)
# IntraF Module
x_temp = x.view(-1, C, H, W)
x_nm_temp = nmap.view(-1, C, H, W)
temp = torch.cat([x_temp, x_nm_temp], dim=1)
x_s1 = self.feature_extract(temp) # B*N, fea_C, H, W
x_s1 = self.feature_extract_acse1(x_s1, x_gr1_lam, x_gr1_beta)
x_s1 = self.feature_extract_acse2(x_s1, x_gr2_lam, x_gr2_beta)
x_s1 = self.feature_extract_acse3(x_s1, x_gr3_lam, x_gr3_beta) # B*N, fea_C, H, W
x_s1 = self.feature_extract_acse4(x_s1, x_gr4_lam, x_gr4_beta) # B*N, fea_C, H, W
# InterF Module
x_s1 = self.align_feature(x_s1, x_gr5, B, N, self.nf, H, W) # [B*N, fea, H, W] -> [B, N, fea, H, W]
x_s1 = self.merge(x_s1.view(-1, self.nf*N, H, W))
# Merge Module: encoder -- decoder
x_s1 = self.feature_up(x_s1) # B, fea_C, H*2, W*2
x_s1 = x_s1.mul(x_gr6_lam) + x_gr6_beta
###
x_s1 = self.conv_block_s1(x_s1) # 64, H*2, W*2
x_s1 = self.acse_block_s1(x_s1)
###
L1_temp = x_s1.clone()
###
x_s2 = self.pool1(x_s1) # 128, H, W
x_s2 = self.conv_block_s2(x_s2) # 128, H, W
x_s2 = self.acse_block_s2(x_s2) # 128, H, W
###
L2_temp = x_s2.clone()
###
x_s3 = self.pool2(x_s2) # 256, H//2, W//2
x_s3 = self.conv_block_s3(x_s3) # 256, H//2, W//2
x_s3 = self.acse_block_s3(x_s3) # 256, H//2, W//2
x_s3 = self.conv_block_s3_2(x_s3) # 256, H//2, W//2
# decoder
out = self.up1(x_s3) # 128, H, W
out = torch.cat((out, L2_temp), 1) # 256, H, W
out = self.conv_block_s4(out) # 128, H, W
out = self.acse_block_s4(out) # 128, H, W
out = self.up2(out) # 64, H*2, W*2
out = torch.cat((out, L1_temp), 1) # 128, H*2, W*2
out = self.conv_block_s5(out) # out_ch, H, W
return out
def align_feature(self, feature, guided_feature, B, N, C, H, W):
feature_temp = torch.cat([feature, guided_feature], dim=0)
# L2
L2_fea = self.lrelu(self.fea_L2_conv1(feature_temp)) # H//2, W//2
L2_fea = self.lrelu(self.fea_L2_conv2(L2_fea))
# L3
L3_fea = self.lrelu(self.fea_L3_conv1(L2_fea)) # H//4, W//4
L3_fea = self.lrelu(self.fea_L3_conv2(L3_fea))
L1_fea = feature_temp.view(2*B, N, -1, H, W)
L2_fea = L2_fea.view(2*B, N, -1, H // 2, W // 2)
L3_fea = L3_fea.view(2*B, N, -1, H // 4, W // 4)
#### align using DConv
# ref feature list
ref_fea_l = [
L1_fea[0:B, self.center, :, :, :].clone(), L2_fea[0:B, self.center, :, :, :].clone(),
L3_fea[0:B, self.center, :, :, :].clone()
]
ref_fea_l_g = [
L1_fea[B:, self.center, :, :, :].clone(), L2_fea[B:, self.center, :, :, :].clone(),
L3_fea[B:, self.center, :, :, :].clone()
]
aligned_fea = []
h_cur = [None, None, None]
c_cur = [None, None, None]
for i in range(N):
nbr_fea_l = [
L1_fea[0:B, i, :, :, :].clone(), L2_fea[0:B, i, :, :, :].clone(),
L3_fea[0:B, i, :, :, :].clone()
]
nbr_fea_l_g = [
L1_fea[B:, i, :, :, :].clone(), L2_fea[B:, i, :, :, :].clone(),
L3_fea[B:, i, :, :, :].clone()
]
a_fea, h_cur, c_cur = self.pcd_align(nbr_fea_l, ref_fea_l, nbr_fea_l_g, ref_fea_l_g, h_cur, c_cur)
aligned_fea.append(a_fea)
aligned_fea = torch.stack(aligned_fea, dim=1) # [B, N, C, H, W]
return aligned_fea
|
[
"torch.nn.ConvTranspose2d",
"torch.stack",
"torch.nn.Sequential",
"models.archs.dcn.deform_conv.ModulatedDeformConvPack",
"torch.nn.Conv2d",
"torch.split",
"torch.cat",
"torch.sigmoid",
"torch.nn.functional.interpolate",
"torch.nn.init.constant_",
"torch.nn.functional.sigmoid",
"torch.zeros",
"torch.nn.LeakyReLU",
"torch.nn.init.orthogonal_",
"torch.tanh"
] |
[((1194, 1216), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (1207, 1216), True, 'import torch.nn as nn\n'), ((1995, 2142), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'input_channels', 'out_channels': 'n_channels', 'kernel_size': '(kernel_size, kernel_size)', 'padding': '(padding, padding)', 'bias': '(False)'}), '(in_channels=input_channels, out_channels=n_channels, kernel_size=\n (kernel_size, kernel_size), padding=(padding, padding), bias=False)\n', (2004, 2142), True, 'import torch.nn as nn\n'), ((2173, 2219), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (2185, 2219), True, 'import torch.nn as nn\n'), ((2257, 2373), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'n_channels', 'out_channels': 'n_channels', 'kernel_size': 'kernel_size', 'padding': 'padding', 'bias': '(True)'}), '(in_channels=n_channels, out_channels=n_channels, kernel_size=\n kernel_size, padding=padding, bias=True)\n', (2266, 2373), True, 'import torch.nn as nn\n'), ((3007, 3107), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'n_channels', 'out_channels': '(1)', 'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(True)'}), '(in_channels=n_channels, out_channels=1, kernel_size=7, stride=1,\n padding=3, bias=True)\n', (3016, 3107), True, 'import torch.nn as nn\n'), ((3679, 3694), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['x_se'], {}), '(x_se)\n', (3688, 3694), True, 'import torch.nn.functional as F\n'), ((4594, 4715), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'input_channels', 'out_channels': 'n_channels', 'kernel_size': 'kernel_size', 'padding': 'padding', 'bias': '(False)'}), '(in_channels=input_channels, out_channels=n_channels, kernel_size=\n kernel_size, padding=padding, bias=False)\n', (4603, 4715), True, 'import torch.nn as nn\n'), ((4746, 4792), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (4758, 4792), True, 'import torch.nn as nn\n'), ((4830, 4946), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'n_channels', 'out_channels': 'n_channels', 'kernel_size': 'kernel_size', 'padding': 'padding', 'bias': '(True)'}), '(in_channels=n_channels, out_channels=n_channels, kernel_size=\n kernel_size, padding=padding, bias=True)\n', (4839, 4946), True, 'import torch.nn as nn\n'), ((5573, 5673), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'n_channels', 'out_channels': '(1)', 'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(True)'}), '(in_channels=n_channels, out_channels=1, kernel_size=7, stride=1,\n padding=3, bias=True)\n', (5582, 5673), True, 'import torch.nn as nn\n'), ((6341, 6356), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['x_se'], {}), '(x_se)\n', (6350, 6356), True, 'import torch.nn.functional as F\n'), ((7204, 7265), 'torch.nn.Conv2d', 'nn.Conv2d', (['(self.nf + self.hf)', '(4 * self.hf)', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(self.nf + self.hf, 4 * self.hf, 3, 1, 1, bias=True)\n', (7213, 7265), True, 'import torch.nn as nn\n'), ((7628, 7667), 'torch.cat', 'torch.cat', (['[input_tensor, h_cur]'], {'dim': '(1)'}), '([input_tensor, h_cur], dim=1)\n', (7637, 7667), False, 'import torch\n'), ((7779, 7821), 'torch.split', 'torch.split', (['combined_conv', 'self.hf'], {'dim': '(1)'}), '(combined_conv, self.hf, dim=1)\n', (7790, 7821), False, 'import torch\n'), ((7835, 7854), 'torch.sigmoid', 'torch.sigmoid', (['cc_i'], {}), '(cc_i)\n', (7848, 7854), False, 'import torch\n'), ((7867, 7886), 'torch.sigmoid', 'torch.sigmoid', (['cc_f'], {}), '(cc_f)\n', (7880, 7886), False, 'import torch\n'), ((7899, 7918), 'torch.sigmoid', 'torch.sigmoid', (['cc_o'], {}), '(cc_o)\n', (7912, 7918), False, 'import torch\n'), ((7931, 7947), 'torch.tanh', 'torch.tanh', (['cc_g'], {}), '(cc_g)\n', (7941, 7947), False, 'import torch\n'), ((8512, 8553), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf * 2, nf, 3, 1, 1, bias=True)\n', (8521, 8553), True, 'import torch.nn as nn\n'), ((8728, 8765), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf, nf, 3, 1, 1, bias=True)\n', (8737, 8765), True, 'import torch.nn as nn\n'), ((8792, 8893), 'models.archs.dcn.deform_conv.ModulatedDeformConvPack', 'DCN', (['nf', 'nf', '(3)'], {'stride': '(1)', 'padding': '(1)', 'dilation': '(1)', 'deformable_groups': 'groups', 'extra_offset_mask': '(True)'}), '(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,\n extra_offset_mask=True)\n', (8795, 8893), True, 'from models.archs.dcn.deform_conv import ModulatedDeformConvPack as DCN\n'), ((8991, 9032), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf * 2, nf, 3, 1, 1, bias=True)\n', (9000, 9032), True, 'import torch.nn as nn\n'), ((9207, 9248), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf * 2, nf, 3, 1, 1, bias=True)\n', (9216, 9248), True, 'import torch.nn as nn\n'), ((9301, 9338), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf, nf, 3, 1, 1, bias=True)\n', (9310, 9338), True, 'import torch.nn as nn\n'), ((9365, 9466), 'models.archs.dcn.deform_conv.ModulatedDeformConvPack', 'DCN', (['nf', 'nf', '(3)'], {'stride': '(1)', 'padding': '(1)', 'dilation': '(1)', 'deformable_groups': 'groups', 'extra_offset_mask': '(True)'}), '(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,\n extra_offset_mask=True)\n', (9368, 9466), True, 'from models.archs.dcn.deform_conv import ModulatedDeformConvPack as DCN\n'), ((9520, 9561), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf * 2, nf, 3, 1, 1, bias=True)\n', (9529, 9561), True, 'import torch.nn as nn\n'), ((9656, 9697), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf * 2, nf, 3, 1, 1, bias=True)\n', (9665, 9697), True, 'import torch.nn as nn\n'), ((9872, 9913), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf * 2, nf, 3, 1, 1, bias=True)\n', (9881, 9913), True, 'import torch.nn as nn\n'), ((9966, 10003), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf, nf, 3, 1, 1, bias=True)\n', (9975, 10003), True, 'import torch.nn as nn\n'), ((10030, 10131), 'models.archs.dcn.deform_conv.ModulatedDeformConvPack', 'DCN', (['nf', 'nf', '(3)'], {'stride': '(1)', 'padding': '(1)', 'dilation': '(1)', 'deformable_groups': 'groups', 'extra_offset_mask': '(True)'}), '(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,\n extra_offset_mask=True)\n', (10033, 10131), True, 'from models.archs.dcn.deform_conv import ModulatedDeformConvPack as DCN\n'), ((10185, 10226), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf * 2, nf, 3, 1, 1, bias=True)\n', (10194, 10226), True, 'import torch.nn as nn\n'), ((10301, 10342), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf * 2, nf, 3, 1, 1, bias=True)\n', (10310, 10342), True, 'import torch.nn as nn\n'), ((10394, 10431), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf, nf, 3, 1, 1, bias=True)\n', (10403, 10431), True, 'import torch.nn as nn\n'), ((10460, 10561), 'models.archs.dcn.deform_conv.ModulatedDeformConvPack', 'DCN', (['nf', 'nf', '(3)'], {'stride': '(1)', 'padding': '(1)', 'dilation': '(1)', 'deformable_groups': 'groups', 'extra_offset_mask': '(True)'}), '(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,\n extra_offset_mask=True)\n', (10463, 10561), True, 'from models.archs.dcn.deform_conv import ModulatedDeformConvPack as DCN\n'), ((10611, 10657), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (10623, 10657), True, 'import torch.nn as nn\n'), ((10998, 11058), 'torch.cat', 'torch.cat', (['(guided_nbr_fea_l[2], guided_ref_fea_l[2])'], {'dim': '(1)'}), '((guided_nbr_fea_l[2], guided_ref_fea_l[2]), dim=1)\n', (11007, 11058), False, 'import torch\n'), ((11285, 11330), 'torch.cat', 'torch.cat', (['(L3_offset, L3_offset_temp)'], {'dim': '(1)'}), '((L3_offset, L3_offset_temp), dim=1)\n', (11294, 11330), False, 'import torch\n'), ((11561, 11621), 'torch.cat', 'torch.cat', (['(guided_nbr_fea_l[1], guided_ref_fea_l[1])'], {'dim': '(1)'}), '((guided_nbr_fea_l[1], guided_ref_fea_l[1]), dim=1)\n', (11570, 11621), False, 'import torch\n'), ((11848, 11893), 'torch.cat', 'torch.cat', (['(L2_offset, L2_offset_temp)'], {'dim': '(1)'}), '((L2_offset, L2_offset_temp), dim=1)\n', (11857, 11893), False, 'import torch\n'), ((11974, 12052), 'torch.nn.functional.interpolate', 'F.interpolate', (['L3_offset'], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(L3_offset, scale_factor=2, mode='bilinear', align_corners=False)\n", (11987, 12052), True, 'import torch.nn.functional as F\n'), ((12293, 12368), 'torch.nn.functional.interpolate', 'F.interpolate', (['L3_fea'], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(L3_fea, scale_factor=2, mode='bilinear', align_corners=False)\n", (12306, 12368), True, 'import torch.nn.functional as F\n'), ((12485, 12545), 'torch.cat', 'torch.cat', (['(guided_nbr_fea_l[0], guided_ref_fea_l[0])'], {'dim': '(1)'}), '((guided_nbr_fea_l[0], guided_ref_fea_l[0]), dim=1)\n', (12494, 12545), False, 'import torch\n'), ((12760, 12805), 'torch.cat', 'torch.cat', (['(L1_offset, L1_offset_temp)'], {'dim': '(1)'}), '((L1_offset, L1_offset_temp), dim=1)\n', (12769, 12805), False, 'import torch\n'), ((12886, 12964), 'torch.nn.functional.interpolate', 'F.interpolate', (['L2_offset'], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(L2_offset, scale_factor=2, mode='bilinear', align_corners=False)\n", (12899, 12964), True, 'import torch.nn.functional as F\n'), ((13205, 13280), 'torch.nn.functional.interpolate', 'F.interpolate', (['L2_fea'], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(L2_fea, scale_factor=2, mode='bilinear', align_corners=False)\n", (13218, 13280), True, 'import torch.nn.functional as F\n'), ((13404, 13444), 'torch.cat', 'torch.cat', (['(L1_fea, ref_fea_l[0])'], {'dim': '(1)'}), '((L1_fea, ref_fea_l[0]), dim=1)\n', (13413, 13444), False, 'import torch\n'), ((14166, 14260), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'nf', 'out_channels': 'nf', 'kernel_size': '(3)', 'stride': '(1)', 'bias': '(True)', 'padding': '(1)'}), '(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=\n True, padding=1)\n', (14175, 14260), True, 'import torch.nn as nn\n'), ((14292, 14386), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'nf', 'out_channels': 'nf', 'kernel_size': '(3)', 'stride': '(1)', 'bias': '(True)', 'padding': '(1)'}), '(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=\n True, padding=1)\n', (14301, 14386), True, 'import torch.nn as nn\n'), ((14571, 14665), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'nf', 'out_channels': 'nf', 'kernel_size': '(3)', 'stride': '(1)', 'bias': '(True)', 'padding': '(1)'}), '(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=\n True, padding=1)\n', (14580, 14665), True, 'import torch.nn as nn\n'), ((14697, 14791), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'nf', 'out_channels': 'nf', 'kernel_size': '(3)', 'stride': '(1)', 'bias': '(True)', 'padding': '(1)'}), '(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=\n True, padding=1)\n', (14706, 14791), True, 'import torch.nn as nn\n'), ((14968, 15062), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'nf', 'out_channels': 'nf', 'kernel_size': '(3)', 'stride': '(1)', 'bias': '(True)', 'padding': '(1)'}), '(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=\n True, padding=1)\n', (14977, 15062), True, 'import torch.nn as nn\n'), ((15094, 15188), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'nf', 'out_channels': 'nf', 'kernel_size': '(3)', 'stride': '(1)', 'bias': '(True)', 'padding': '(1)'}), '(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=\n True, padding=1)\n', (15103, 15188), True, 'import torch.nn as nn\n'), ((15365, 15459), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'nf', 'out_channels': 'nf', 'kernel_size': '(3)', 'stride': '(1)', 'bias': '(True)', 'padding': '(1)'}), '(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=\n True, padding=1)\n', (15374, 15459), True, 'import torch.nn as nn\n'), ((15491, 15585), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'nf', 'out_channels': 'nf', 'kernel_size': '(3)', 'stride': '(1)', 'bias': '(True)', 'padding': '(1)'}), '(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=\n True, padding=1)\n', (15500, 15585), True, 'import torch.nn as nn\n'), ((15907, 16009), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': 'nf', 'out_channels': 'nf', 'kernel_size': '(2)', 'stride': '(2)', 'padding': '(0)', 'bias': '(True)'}), '(in_channels=nf, out_channels=nf, kernel_size=2, stride=2,\n padding=0, bias=True)\n', (15925, 16009), True, 'import torch.nn as nn\n'), ((16069, 16163), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'nf', 'out_channels': 'nf', 'kernel_size': '(3)', 'stride': '(1)', 'bias': '(True)', 'padding': '(1)'}), '(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=\n True, padding=1)\n', (16078, 16163), True, 'import torch.nn as nn\n'), ((16195, 16289), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'nf', 'out_channels': 'nf', 'kernel_size': '(3)', 'stride': '(1)', 'bias': '(True)', 'padding': '(1)'}), '(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=\n True, padding=1)\n', (16204, 16289), True, 'import torch.nn as nn\n'), ((17136, 17173), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf', '(3)', '(2)', '(1)'], {'bias': '(True)'}), '(nf, nf, 3, 2, 1, bias=True)\n', (17145, 17173), True, 'import torch.nn as nn\n'), ((17202, 17239), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf, nf, 3, 1, 1, bias=True)\n', (17211, 17239), True, 'import torch.nn as nn\n'), ((17268, 17305), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf', '(3)', '(2)', '(1)'], {'bias': '(True)'}), '(nf, nf, 3, 2, 1, bias=True)\n', (17277, 17305), True, 'import torch.nn as nn\n'), ((17334, 17371), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf, nf, 3, 1, 1, bias=True)\n', (17343, 17371), True, 'import torch.nn as nn\n'), ((17451, 17498), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * nframes)', 'nf', '(3)', '(1)', '(1)'], {'bias': '(True)'}), '(nf * nframes, nf, 3, 1, 1, bias=True)\n', (17460, 17498), True, 'import torch.nn as nn\n'), ((17524, 17626), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': 'nf', 'out_channels': 'nf', 'kernel_size': '(2)', 'stride': '(2)', 'padding': '(0)', 'bias': '(True)'}), '(in_channels=nf, out_channels=nf, kernel_size=2, stride=2,\n padding=0, bias=True)\n', (17542, 17626), True, 'import torch.nn as nn\n'), ((17986, 18027), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(2 * nf)', '(3)', '(2)', '(1)'], {'bias': '(True)'}), '(nf, 2 * nf, 3, 2, 1, bias=True)\n', (17995, 18027), True, 'import torch.nn as nn\n'), ((18364, 18409), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2 * nf)', '(4 * nf)', '(3)', '(2)', '(1)'], {'bias': '(True)'}), '(2 * nf, 4 * nf, 3, 2, 1, bias=True)\n', (18373, 18409), True, 'import torch.nn as nn\n'), ((18914, 18960), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (18926, 18960), True, 'import torch.nn as nn\n'), ((18998, 19108), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(4 * nf)', 'out_channels': '(2 * nf)', 'kernel_size': '(2)', 'stride': '(2)', 'padding': '(0)', 'bias': '(True)'}), '(in_channels=4 * nf, out_channels=2 * nf, kernel_size=2,\n stride=2, padding=0, bias=True)\n', (19016, 19108), True, 'import torch.nn as nn\n'), ((19532, 19638), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': '(2 * nf)', 'out_channels': 'nf', 'kernel_size': '(2)', 'stride': '(2)', 'padding': '(0)', 'bias': '(True)'}), '(in_channels=2 * nf, out_channels=nf, kernel_size=2,\n stride=2, padding=0, bias=True)\n', (19550, 19638), True, 'import torch.nn as nn\n'), ((20222, 20256), 'torch.cat', 'torch.cat', (['[x_gr, x_gr_map]'], {'dim': '(1)'}), '([x_gr, x_gr_map], dim=1)\n', (20231, 20256), False, 'import torch\n'), ((21333, 21370), 'torch.cat', 'torch.cat', (['[x_temp, x_nm_temp]'], {'dim': '(1)'}), '([x_temp, x_nm_temp], dim=1)\n', (21342, 21370), False, 'import torch\n'), ((22865, 22893), 'torch.cat', 'torch.cat', (['(out, L2_temp)', '(1)'], {}), '((out, L2_temp), 1)\n', (22874, 22893), False, 'import torch\n'), ((23100, 23128), 'torch.cat', 'torch.cat', (['(out, L1_temp)', '(1)'], {}), '((out, L1_temp), 1)\n', (23109, 23128), False, 'import torch\n'), ((23327, 23370), 'torch.cat', 'torch.cat', (['[feature, guided_feature]'], {'dim': '(0)'}), '([feature, guided_feature], dim=0)\n', (23336, 23370), False, 'import torch\n'), ((24890, 24921), 'torch.stack', 'torch.stack', (['aligned_fea'], {'dim': '(1)'}), '(aligned_fea, dim=1)\n', (24901, 24921), False, 'import torch\n'), ((590, 710), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'input_channels', 'out_channels': 'n_channels', 'kernel_size': 'kernel_size', 'padding': 'padding', 'bias': '(True)'}), '(in_channels=input_channels, out_channels=n_channels, kernel_size=\n kernel_size, padding=padding, bias=True)\n', (599, 710), True, 'import torch.nn as nn\n'), ((729, 775), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (741, 775), True, 'import torch.nn as nn\n'), ((1048, 1169), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'n_channels', 'out_channels': 'output_channel', 'kernel_size': 'kernel_size', 'padding': 'padding', 'bias': '(False)'}), '(in_channels=n_channels, out_channels=output_channel, kernel_size=\n kernel_size, padding=padding, bias=False)\n', (1057, 1169), True, 'import torch.nn as nn\n'), ((8004, 8022), 'torch.tanh', 'torch.tanh', (['c_next'], {}), '(c_next)\n', (8014, 8022), False, 'import torch\n'), ((13315, 13349), 'torch.cat', 'torch.cat', (['[L1_fea, L2_fea]'], {'dim': '(1)'}), '([L1_fea, L2_fea], dim=1)\n', (13324, 13349), False, 'import torch\n'), ((838, 955), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'n_channels', 'out_channels': 'n_channels', 'kernel_size': 'kernel_size', 'padding': 'padding', 'bias': '(False)'}), '(in_channels=n_channels, out_channels=n_channels, kernel_size=\n kernel_size, padding=padding, bias=False)\n', (847, 955), True, 'import torch.nn as nn\n'), ((978, 1024), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.1)', 'inplace': '(True)'}), '(negative_slope=0.1, inplace=True)\n', (990, 1024), True, 'import torch.nn as nn\n'), ((1459, 1485), 'torch.nn.init.orthogonal_', 'init.orthogonal_', (['m.weight'], {}), '(m.weight)\n', (1475, 1485), True, 'import torch.nn.init as init\n'), ((4062, 4088), 'torch.nn.init.orthogonal_', 'init.orthogonal_', (['m.weight'], {}), '(m.weight)\n', (4078, 4088), True, 'import torch.nn.init as init\n'), ((6724, 6750), 'torch.nn.init.orthogonal_', 'init.orthogonal_', (['m.weight'], {}), '(m.weight)\n', (6740, 6750), True, 'import torch.nn.init as init\n'), ((8162, 8209), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.hf', 'height', 'width'], {}), '(batch_size, self.hf, height, width)\n', (8173, 8209), False, 'import torch\n'), ((12105, 12149), 'torch.cat', 'torch.cat', (['[L2_offset, L3_offset * 2]'], {'dim': '(1)'}), '([L2_offset, L3_offset * 2], dim=1)\n', (12114, 12149), False, 'import torch\n'), ((12414, 12448), 'torch.cat', 'torch.cat', (['[L2_fea, L3_fea]'], {'dim': '(1)'}), '([L2_fea, L3_fea], dim=1)\n', (12423, 12448), False, 'import torch\n'), ((13017, 13061), 'torch.cat', 'torch.cat', (['[L1_offset, L2_offset * 2]'], {'dim': '(1)'}), '([L1_offset, L2_offset * 2], dim=1)\n', (13026, 13061), False, 'import torch\n'), ((1582, 1607), 'torch.nn.init.constant_', 'init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (1596, 1607), True, 'import torch.nn.init as init\n'), ((1672, 1699), 'torch.nn.init.constant_', 'init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (1686, 1699), True, 'import torch.nn.init as init\n'), ((1716, 1741), 'torch.nn.init.constant_', 'init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (1730, 1741), True, 'import torch.nn.init as init\n'), ((4185, 4210), 'torch.nn.init.constant_', 'init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (4199, 4210), True, 'import torch.nn.init as init\n'), ((4275, 4302), 'torch.nn.init.constant_', 'init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (4289, 4302), True, 'import torch.nn.init as init\n'), ((4319, 4344), 'torch.nn.init.constant_', 'init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (4333, 4344), True, 'import torch.nn.init as init\n'), ((6847, 6872), 'torch.nn.init.constant_', 'init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (6861, 6872), True, 'import torch.nn.init as init\n'), ((6937, 6964), 'torch.nn.init.constant_', 'init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (6951, 6964), True, 'import torch.nn.init as init\n'), ((6981, 7006), 'torch.nn.init.constant_', 'init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (6995, 7006), True, 'import torch.nn.init as init\n')]
|
import hydra
import logging
import os
import torch
import tqdm
import xarray as xr
from ..dataset import S2SDataset, TransformedDataset
from ..transform import ExampleToPytorch, CompositeTransform
from ..util import ECMWF_FORECASTS, collate_with_xarray
from .lightning import S2STercilesModule
from .util import find_checkpoint_file
_logger = logging.getLogger(__name__)
def terciles_pytorch_to_xarray(
t2m, tp, example_forecast, dims=["category", "lead_time", "latitude", "longitude"]
):
t2m_array = xr.DataArray(data=t2m.detach().numpy(), dims=dims, name="t2m")
tp_array = xr.DataArray(data=tp.detach().numpy(), dims=dims, name="tp")
dataset = xr.Dataset(data_vars={"t2m": t2m_array, "tp": tp_array,})
dataset = dataset.assign_coords(
{
"forecast_year": example_forecast.forecast_year.data,
"forecast_monthday": example_forecast.forecast_monthday.data,
"lead_time": example_forecast.lead_time.data,
"valid_time": example_forecast.valid_time.data,
"forecast_time": example_forecast.forecast_time.data,
"latitude": example_forecast.latitude.data,
"longitude": example_forecast.longitude.data,
"category": ["below normal", "near normal", "above normal"],
}
).expand_dims(["forecast_year", "forecast_monthday"])
return dataset
def concat_predictions(predictions):
yearly_predictions = {}
for p in predictions:
year = int(p.forecast_year.data)
yearly_list = yearly_predictions.get(year, [])
yearly_list.append(p)
yearly_predictions[year] = yearly_list
nested_datasets = [yearly_predictions[k] for k in sorted(yearly_predictions.keys())]
yearly_datasets = []
for l in nested_datasets:
l = sorted(l, key=lambda x: str(x.forecast_monthday[0]))
d = xr.concat(l, dim="forecast_monthday")
yearly_datasets.append(d)
return xr.concat(yearly_datasets, dim="forecast_year")
def fix_dims_for_output(forecast_dataset):
"""Manipulate the dimensions of the dataset of a single forecast so that we
can concatenate them easily."""
return (
forecast_dataset.stack(
{"forecast_label": ["forecast_year", "forecast_monthday"]}
)
.expand_dims("forecast_time")
.drop("forecast_label")
.squeeze("forecast_label")
)
def example_to_cuda(example):
new_example = {}
for k in example:
if k not in ["monthday", "month", "year"]:
new_example[k] = example[k].cuda()
else:
new_example[k] = example[k]
return new_example
@hydra.main(config_path="conf", config_name="infer")
def cli(cfg):
transform = hydra.utils.instantiate(cfg.experiment.transform)
# Find where we convert to pytorch. For inference we delay the conversion to pytorch
# because we want to use the xarray data as a template to generate the output file.
for i, t in enumerate(transform.transforms):
if isinstance(t, ExampleToPytorch):
pytorch_transform_idx = i
last_transform = CompositeTransform(transform.transforms[pytorch_transform_idx:])
transform.transforms = transform.transforms[:pytorch_transform_idx]
years = list(range(cfg.begin, cfg.end))
if cfg.experiment.dataset.index is not None:
month, day = ECMWF_FORECASTS[cfg.experiment.dataset.index]
label = f"{month:02}{day:02}.nc"
_logger.info("Targetting monthday %s", label)
name_filter = lambda x: x.endswith(label)
else:
name_filter = None
dataset = TransformedDataset(
S2SDataset(
hydra.utils.to_absolute_path(cfg.test_dataset_dir),
years=years,
name_filter=name_filter,
include_features=cfg.experiment.dataset.load_features,
),
transform,
)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
batch_sampler=None,
collate_fn=collate_with_xarray,
num_workers=int(cfg.num_workers),
shuffle=False,
)
checkpoint_path = find_checkpoint_file(
hydra.utils.to_absolute_path(cfg.checkpoint_dir)
)
_logger.info(f"Will run on checkpoint {checkpoint_path}")
model = hydra.utils.instantiate(cfg.experiment.model)
optimizer = hydra.utils.call(cfg.experiment.optimizer, model)
lightning_module = S2STercilesModule.load_from_checkpoint(
checkpoint_path, model=model, optimizer=optimizer
)
lightning_module.eval()
lightning_module.freeze()
lightning_module.cuda()
datasets_of_examples = []
for example in tqdm.tqdm(dataloader):
example_forecast = example["terciles"]
pytorch_example = last_transform(example)
pytorch_example = example_to_cuda(pytorch_example)
t2m_terciles, tp_terciles, *_ = lightning_module(pytorch_example)
dataset = terciles_pytorch_to_xarray(
t2m_terciles.cpu(),
tp_terciles.cpu(),
example_forecast,
dims=["batch", "category", "lead_time", "latitude", "longitude"],
)
datasets_of_examples.append(fix_dims_for_output(dataset))
sorted_datasets = sorted(
datasets_of_examples, key=lambda x: str(x.forecast_time.data[0])
)
ml_prediction = (
xr.concat(sorted_datasets, dim="forecast_time")
.drop("valid_time")
.squeeze("batch")
)
_logger.info(f"Outputting forecasts to {os.getcwd() + '/' + cfg.output_file}.")
ml_prediction.to_netcdf(cfg.output_file)
if __name__ == "__main__":
cli()
|
[
"tqdm.tqdm",
"hydra.utils.to_absolute_path",
"hydra.utils.instantiate",
"os.getcwd",
"xarray.concat",
"xarray.Dataset",
"hydra.main",
"hydra.utils.call",
"logging.getLogger"
] |
[((345, 372), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (362, 372), False, 'import logging\n'), ((2646, 2697), 'hydra.main', 'hydra.main', ([], {'config_path': '"""conf"""', 'config_name': '"""infer"""'}), "(config_path='conf', config_name='infer')\n", (2656, 2697), False, 'import hydra\n'), ((666, 722), 'xarray.Dataset', 'xr.Dataset', ([], {'data_vars': "{'t2m': t2m_array, 'tp': tp_array}"}), "(data_vars={'t2m': t2m_array, 'tp': tp_array})\n", (676, 722), True, 'import xarray as xr\n'), ((1944, 1991), 'xarray.concat', 'xr.concat', (['yearly_datasets'], {'dim': '"""forecast_year"""'}), "(yearly_datasets, dim='forecast_year')\n", (1953, 1991), True, 'import xarray as xr\n'), ((2728, 2777), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['cfg.experiment.transform'], {}), '(cfg.experiment.transform)\n', (2751, 2777), False, 'import hydra\n'), ((4283, 4328), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['cfg.experiment.model'], {}), '(cfg.experiment.model)\n', (4306, 4328), False, 'import hydra\n'), ((4345, 4394), 'hydra.utils.call', 'hydra.utils.call', (['cfg.experiment.optimizer', 'model'], {}), '(cfg.experiment.optimizer, model)\n', (4361, 4394), False, 'import hydra\n'), ((4660, 4681), 'tqdm.tqdm', 'tqdm.tqdm', (['dataloader'], {}), '(dataloader)\n', (4669, 4681), False, 'import tqdm\n'), ((1860, 1897), 'xarray.concat', 'xr.concat', (['l'], {'dim': '"""forecast_monthday"""'}), "(l, dim='forecast_monthday')\n", (1869, 1897), True, 'import xarray as xr\n'), ((4153, 4201), 'hydra.utils.to_absolute_path', 'hydra.utils.to_absolute_path', (['cfg.checkpoint_dir'], {}), '(cfg.checkpoint_dir)\n', (4181, 4201), False, 'import hydra\n'), ((3658, 3708), 'hydra.utils.to_absolute_path', 'hydra.utils.to_absolute_path', (['cfg.test_dataset_dir'], {}), '(cfg.test_dataset_dir)\n', (3686, 3708), False, 'import hydra\n'), ((5350, 5397), 'xarray.concat', 'xr.concat', (['sorted_datasets'], {'dim': '"""forecast_time"""'}), "(sorted_datasets, dim='forecast_time')\n", (5359, 5397), True, 'import xarray as xr\n'), ((5503, 5514), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5512, 5514), False, 'import os\n')]
|
# This file contains the functions for the data access and cycle count models for the Layers executed on the Systolic Array
import logging
import math
import numpy as np
from data_objects import HardwareObject, SAResult_Inflayer, SIMDResult_Inflayer
from layer_object import LayerObject
def conv_access_model(Hardware_param, LayerObj, SysResult_inflayer):
# data access model for convolution layer
#unpacking the parameters. Doing this unpacking at the beginning of each function
# Although repetition of code, doing this since some parameters such bit-width fusion etc may come from a different object
# In future to address such changes, only this unpacking part will need to be modified and the main body of the function will be untouched
bw_filter = LayerObj.bw_filter; bw_ifmap = LayerObj.bw_ifmap; bw_ofmap = LayerObj.bw_ofmap
bw_psum = LayerObj.bw_psum; bw_bias = LayerObj.bw_bias
OW = LayerObj.OW
OH = LayerObj.OH
OC = LayerObj.OC
KW = LayerObj.KW
KH = LayerObj.KH
IC = LayerObj.IC
IW = LayerObj.IW
IH = LayerObj.IH
Batch = LayerObj.Batch
DTile_ow = LayerObj.DTile_ow
DTile_oh = LayerObj.DTile_oh
DTile_oc = LayerObj.DTile_oc
DTile_kw = LayerObj.DTile_kw
DTile_kh = LayerObj.DTile_kh
DTile_ic = LayerObj.DTile_ic
DTile_iw = LayerObj.DTile_iw
DTile_ih = LayerObj.DTile_ih
DTile_batch = LayerObj.DTile_batch
Loop_order = LayerObj.Loop_order
fusion_status = LayerObj.fusion_status
#print(Size_IBUF)
#print(Loop_order)
#print(Pad)
# Determining which dataflow out of the three dataflow class form the input loop order
WS_key = ['ow', 'oh', 'n']
OS_key = ['kw', 'kh', 'ic']
IS_key = ['oc']
for key in WS_key:
if Loop_order[0] == key:
dataflow = "weight_stationary"
break
for key in OS_key:
if Loop_order[0] == key:
dataflow = "output_stationary"
break
for key in IS_key:
if Loop_order[0] == key:
dataflow = "input_stationary"
break
#print(dataflow)
######### Model for DRAM accesses
if (fusion_status == "NoFusion"):
if dataflow == "weight_stationary":
#imap access
ifmap_access_DRAM = (DTile_iw * DTile_ih * DTile_ic * DTile_batch) * (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) \
* (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * math.ceil((OC/DTile_oc)) * bw_ifmap # in bit
#######filter access
#common multiplier regardless of the variant of WS dataflow
filter_access_common = (DTile_kw * DTile_kh * DTile_ic * DTile_oc) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc)
ow_multiplier = OW/DTile_ow
oh_multiplier = OH/DTile_oh
n_multiplier = Batch/DTile_batch
WS_Dict = {'ow': ow_multiplier, 'oh': oh_multiplier, 'n': n_multiplier}
#print("WS_Dict:", WS_Dict)
# First determining how many keys from the innermost loop matched in the given loop order
loopids = {'first': "match", 'second': "nomatch", 'third': "nomatch"}
#the first loop id will always be one of the keys since this is under WS category, hence first one is matched by default.
#beginning with no match for the second and third ids and will change them to match depending on the cases
#print("BEFORE:", loopids)
for key in WS_key:
if Loop_order[1] == key:
loopids['second'] = "match"
if loopids['second'] == "nomatch":
WScase = "oneKey" #case determined, only one key match, case 1, no further calculation needed
else:
for key in WS_key:
if Loop_order[2] == key:
loopids['third'] = "match"
if loopids['third'] == "nomatch":
WScase = "twoKey" #case determined, two keys matched, case 2, no further calculation needed
else:
WScase = "threeKey" #case determined, all three keys matched, case 3
#print("AFTER:", loopids)
#print("WS Case:", WScase)
#Depending on the WScase, now determining filter multiplier based on how many innermost loops matches the WS_keys
if WScase == "threeKey":
filter_multiplier = 1 # all three key matched, so optimal WS, filter multiplier is 1
elif WScase == "twoKey":
for key in WS_key:
if key != Loop_order[0] and key != Loop_order[1]: # tow key matched and one key does not match
mulkey = key
#print("mulkey:", mulkey)
filter_multiplier = WS_Dict[mulkey]
elif WScase == "oneKey":
mulkey1 = "empty"
mulkey2 = "empty"
for key in WS_key:
if key != Loop_order[0]: # only one key matched, hence two unmatched key to identify
if mulkey1 == "empty" and mulkey2 == "empty":
mulkey1 = key # one unmatched key is placed in mulkey 1
else:
mulkey2 = key # another unmatched key is placed in mulkey 2
print("mulkey1:", mulkey1)
print("mulkey2:", mulkey2)
filter_multiplier = WS_Dict[mulkey1] * WS_Dict[mulkey2]
#print("filter_multiplier:", filter_multiplier)
filter_access_DRAM = filter_access_common * filter_multiplier * bw_filter # in bit
#psum access
ofpsm_access_DRAM = (DTile_ow * DTile_oh * DTile_oc * DTile_batch) * (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (OC/DTile_oc) \
* (2 * math.ceil(IC/DTile_ic) * math.ceil(KW/DTile_kw) * math.ceil(KH/DTile_kh) - 1)
psum_access_DRAM = (ofpsm_access_DRAM - (OW * OH * OC * Batch)) * bw_psum #in bit
#ofmap access
ofmap_access_DRAM = OW * OH * OC * Batch * bw_ofmap # in bit
#bias access
bias_access_DRAM = DTile_oc * (OC/DTile_oc) * bw_bias
#print("ifmap_access_DRAM:", ifmap_access_DRAM)
#print("filter_access_DRAM:", filter_access_DRAM)
#print("ofpsm_access_DRAM:", ofpsm_access_DRAM)
#print("psum_access_DRAM:", psum_access_DRAM)
#print("ofmap_access_DRAM:", ofmap_access_DRAM)
#print("bias_access_DRAM:", bias_access_DRAM)
elif dataflow == "output_stationary":
print("will do")
elif dataflow == "input_stationary":
print("will do")
else:
print("Invalid dataflow")
else:
print("model for fusion do not exist yet")
##### Model for SRAM accesses (Original SRAM access do not depend on fusion)
SRAM_stationary_flag = "NoStationary" # current genesys systolic PE hardware does not support any stationary logic for SRAM accesses
if SRAM_stationary_flag == "NoStationary":
conv_SRAM_access_NoStationary(Hardware_param, LayerObj, SysResult_inflayer)
else:
print("will write generic code for SRAM stationary logic based on dataflow")
SysResult_inflayer.DRAM_access['filter'] = filter_access_DRAM
SysResult_inflayer.DRAM_access['ifmap'] = ifmap_access_DRAM
SysResult_inflayer.DRAM_access['ofmap'] = ofmap_access_DRAM
SysResult_inflayer.DRAM_access['psum'] = psum_access_DRAM
SysResult_inflayer.DRAM_access['bias'] = bias_access_DRAM
def conv_SRAM_access_NoStationary(Hardware_param, LayerObj, SysResult_inflayer):
# Current genesys PE hardware does not support any stationary logic for SRAM accesses
# Hence SRAM access pattern does not depend on loop order or dataflow and this function gives the SRAM access pattern for this scenario
# unpacking the parameters
bw_filter = LayerObj.bw_filter; bw_ifmap = LayerObj.bw_ifmap; bw_ofmap = LayerObj.bw_ofmap
bw_psum = LayerObj.bw_psum; bw_bias = LayerObj.bw_bias
OW = LayerObj.OW
OH = LayerObj.OH
OC = LayerObj.OC
KW = LayerObj.KW
KH = LayerObj.KH
IC = LayerObj.IC
IW = LayerObj.IW
IH = LayerObj.IH
Batch = LayerObj.Batch
DTile_ow = LayerObj.DTile_ow
DTile_oh = LayerObj.DTile_oh
DTile_oc = LayerObj.DTile_oc
DTile_kw = LayerObj.DTile_kw
DTile_kh = LayerObj.DTile_kh
DTile_ic = LayerObj.DTile_ic
DTile_iw = LayerObj.DTile_iw
DTile_ih = LayerObj.DTile_ih
DTile_batch = LayerObj.DTile_batch
Stile_ow = LayerObj.Stile_ow
Stile_oh = LayerObj.Stile_oh
Stile_oc = LayerObj.Stile_oc
Stile_kw = LayerObj.Stile_kw
Stile_kh = LayerObj.Stile_kh
Stile_ic = LayerObj.Stile_ic
Stile_iw = LayerObj.Stile_iw
Stile_ih = LayerObj.Stile_ih
Stile_batch = LayerObj.Stile_batch
#ifmap access
ifmap_DRAM_loop_mul = (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * math.ceil((OC/DTile_oc))
ifmap_access_SRAM = (Stile_iw * Stile_ih * Stile_ic * Stile_batch) * (DTile_ow/Stile_ow) * (DTile_oh/Stile_oh) * (DTile_batch/Stile_batch) \
* (DTile_ic/Stile_ic) * (DTile_kw/Stile_kw) * (DTile_kh/Stile_kh) * math.ceil((DTile_oc/Stile_oc)) * ifmap_DRAM_loop_mul * bw_ifmap # in bit
# filter access
filter_DRAM_loop_mul = math.ceil((OW/DTile_ow)) * math.ceil((OH/DTile_oh)) * math.ceil((Batch/DTile_batch)) \
* (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc)
filter_access_SRAM = (Stile_kw * Stile_kh * Stile_ic * Stile_oc) * (DTile_ic/Stile_ic) * (DTile_kw/Stile_kw) * (DTile_kh/Stile_kh) * (DTile_oc/Stile_oc) \
* math.ceil((DTile_ow/Stile_ow)) * math.ceil((DTile_oh/Stile_oh)) * math.ceil((DTile_batch/Stile_batch)) \
* filter_DRAM_loop_mul * bw_filter # in bit
# psum access
pDRAM_loop_mula = math.ceil(IC/DTile_ic) * math.ceil(KW/DTile_kw) * math.ceil(KH/DTile_kh)
pDRAM_loop_mulb = (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (OC/DTile_oc)
psum_access_SRAM = (Stile_ow * Stile_oh * Stile_oc * Stile_batch) * (DTile_ow/Stile_ow) * (DTile_oh/Stile_oh) * (DTile_batch/Stile_batch) * (DTile_oc/Stile_oc) \
* (2 * math.ceil(DTile_ic/Stile_ic) * math.ceil(DTile_kw/Stile_kw) * math.ceil(DTile_kh/Stile_kh) * pDRAM_loop_mula - 1) \
* pDRAM_loop_mulb * bw_psum # in bit
# bias access, for each ofmap location, bias term need to be added once,
bias_DRAM_loop_mul = (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (OC/DTile_oc)
bias_access_SRAM = (Stile_oc) * DTile_ow * DTile_oh * DTile_batch * (DTile_oc/Stile_oc) * bias_DRAM_loop_mul * bw_bias # in bit
#print("ifmap_access_SRAM:", ifmap_access_SRAM)
#print("filter_access_SRAM:", filter_access_SRAM)
#print("psum_access_SRAM:", psum_access_SRAM)
#print("bias_access_SRAM:", bias_access_SRAM)
SysResult_inflayer.SRAM_access['filter'] = filter_access_SRAM
SysResult_inflayer.SRAM_access['ifmap'] = ifmap_access_SRAM
SysResult_inflayer.SRAM_access['psum'] = psum_access_SRAM
SysResult_inflayer.SRAM_access['bias'] = bias_access_SRAM
def conv_cycle_model(Hardware_param, LayerObj, SysResult_inflayer):
#compute cycle and DRAM stall cycle count model for the convolution layer
# unpacking the parameters
SysArray_row = Hardware_param.SysArray_row; SysArray_col = Hardware_param.SysArray_col
OW = LayerObj.OW
OH = LayerObj.OH
OC = LayerObj.OC
KW = LayerObj.KW
KH = LayerObj.KH
IC = LayerObj.IC
IW = LayerObj.IW
IH = LayerObj.IH
Batch = LayerObj.Batch
DTile_ow = LayerObj.DTile_ow
DTile_oh = LayerObj.DTile_oh
DTile_oc = LayerObj.DTile_oc
DTile_kw = LayerObj.DTile_kw
DTile_kh = LayerObj.DTile_kh
DTile_ic = LayerObj.DTile_ic
DTile_iw = LayerObj.DTile_iw
DTile_ih = LayerObj.DTile_ih
DTile_batch = LayerObj.DTile_batch
Stile_ow = LayerObj.Stile_ow
Stile_oh = LayerObj.Stile_oh
Stile_oc = LayerObj.Stile_oc
Stile_kw = LayerObj.Stile_kw
Stile_kh = LayerObj.Stile_kh
Stile_ic = LayerObj.Stile_ic
Stile_iw = LayerObj.Stile_iw
Stile_ih = LayerObj.Stile_ih
Stile_batch = LayerObj.Stile_batch
fusion_status = LayerObj.fusion_status
### determining the on-chip compute cycles, compute cycles do not depend on loop order, or fusion
cycle_oneTile = (DTile_ow/Stile_ow) * (DTile_oh/Stile_oh) * (DTile_kw/Stile_kw) * (DTile_kh/Stile_kh) * (DTile_batch/Stile_batch) \
* math.ceil(DTile_ic/Stile_ic) * math.ceil(DTile_oc/Stile_oc)
#print(cycle_oneTile)
#pipeline overhead for each DRAM tile
pipe_overhead_tile = (SysArray_row - 1) + (SysArray_col - 1) #using PE row and col
#for now omitting the use of any ceil since DRAM tile size will be integer multiple of loops
Number_of_Tile = (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc)
compute_cycles = math.ceil((cycle_oneTile + pipe_overhead_tile) * Number_of_Tile) # giving the outer ceil to avoid fraction cycle numbers
#print("compute_cycles:", compute_cycles)
SysResult_inflayer.cycles['compute'] = compute_cycles
#of cycles to compute one tile including the pipeline setup operhead, need this variable to compute DRAM stall cycles
ComputeTile_cycles = cycle_oneTile + pipe_overhead_tile
######## model for the DRAM stall cycles, depends on loop order, fusion etc
if (fusion_status == "NoFusion"): #Model for the version where there is no fusion
DRAM_stall_cycles = conv_stall_model_nofu(Hardware_param, LayerObj, ComputeTile_cycles, SysResult_inflayer)
else:
print("model for fusion do not exist yet")
SysResult_inflayer.cycles['total'] = compute_cycles + DRAM_stall_cycles
####### Counting number of MAC operations: writing in a generic way for future extension (ceiling affects cycle count and #of MAC differently)
PE_tile_mac = (Stile_ow * Stile_oh * Stile_oc * Stile_batch) * (Stile_ic * Stile_kw * Stile_kh)
SRAM_tile_mac = PE_tile_mac * (DTile_ow/Stile_ow) * (DTile_oh/Stile_oh) * (DTile_kw/Stile_kw) * (DTile_kh/Stile_kh) * (DTile_batch/Stile_batch) \
* (DTile_ic/Stile_ic) * (DTile_oc/Stile_oc)
Nos_of_mac = SRAM_tile_mac * (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc)
print("Nos of MAC:", Nos_of_mac)
SysResult_inflayer.arithmetic['mac'] = Nos_of_mac
def conv_stall_model_nofu(Hardware_param, LayerObj, ComputeTile_cycles, SysResult_inflayer):
#DRAM stall cycle count model for the convolution layer when there is no fusion
bw_filter = LayerObj.bw_filter; bw_ifmap = LayerObj.bw_ifmap; bw_ofmap = LayerObj.bw_ofmap
bw_psum = LayerObj.bw_psum; bw_bias = LayerObj.bw_bias
SysArray_row = Hardware_param.SysArray_row; SysArray_col = Hardware_param.SysArray_col
RBW_DRAM_to_WBUF = Hardware_param.RBW_DRAM_to_WBUF # in bit/cycle, bias is also loaded through the same AXI interface
RBW_DRAM_to_IBUF = Hardware_param.RBW_DRAM_to_IBUF
RBW_DRAM_to_OBUF = Hardware_param.RBW_DRAM_to_OBUF
WBW_OBUF_to_DRAM = Hardware_param.WBW_OBUF_to_DRAM
OW = LayerObj.OW
OH = LayerObj.OH
OC = LayerObj.OC
KW = LayerObj.KW
KH = LayerObj.KH
IC = LayerObj.IC
IW = LayerObj.IW
IH = LayerObj.IH
Batch = LayerObj.Batch
DTile_ow = LayerObj.DTile_ow
DTile_oh = LayerObj.DTile_oh
DTile_oc = LayerObj.DTile_oc
DTile_kw = LayerObj.DTile_kw
DTile_kh = LayerObj.DTile_kh
DTile_ic = LayerObj.DTile_ic
DTile_iw = LayerObj.DTile_iw
DTile_ih = LayerObj.DTile_ih
DTile_batch = LayerObj.DTile_batch
Loop_order = LayerObj.Loop_order
# Determining which dataflow out of the three dataflow class form the input loop order
WS_key = ['ow', 'oh', 'n']
OS_key = ['kw', 'kh', 'ic']
IS_key = ['oc']
for key in WS_key:
if Loop_order[0] == key:
dataflow = "weight_stationary"
break
for key in OS_key:
if Loop_order[0] == key:
dataflow = "output_stationary"
break
for key in IS_key:
if Loop_order[0] == key:
dataflow = "input_stationary"
break
#print("Dataflow:", dataflow)
if dataflow == "weight_stationary":
# The current DRAM stall model is valid for any WS loop order with oc at the outermost loop (DUE TO SOME CORNER SITUATIONs, EXTENSION IS POSSIBLE)
Loop_order1 = ['ow', 'oh', 'kw', 'kh', 'ic', 'n', 'oc'] # current GeneSys Loop order
Loop_order2 = ['ow', 'oh', 'n', 'kw', 'kh', 'ic', 'oc'] # an optimal WS loop order, there are equivalent varients of these loop order, WILL ADD LATER IN CODE
if (Loop_order == Loop_order1 and (OW/DTile_ow * OH/DTile_oh) > 2) or (Loop_order == Loop_order2 and (OW/DTile_ow * OH/DTile_oh * Batch/DTile_batch) > 2):
# The tiling condition ensures that the numbers of WS tiles is at least 3 to be able to normally execute the 3 stage double-buffered DRAM pipeline
No_Warning = "True"
else:
print("WARNING: Number of WS tile is less than 3")
print("Nos of WS tile:", (OW/DTile_ow * OH/DTile_oh * Batch/DTile_batch))
print("Nos of DRAM WS + OS tiles:", (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh))
print("Nos of total DRAM tiles:", (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc))
#print("OH:", OH, "OW:", OW, "DTile_oh:", DTile_oh, "DTile_ow:", DTile_ow)
if (Loop_order == Loop_order1) or (Loop_order == Loop_order2):
if Loop_order == Loop_order1:
filter_multiplier = Batch/DTile_batch
elif Loop_order == Loop_order2:
filter_multiplier = 1
#print(filter_multiplier)
#of tiles where weights are being loaded (regardless of bias)
NT_weight = (KW/DTile_kw) * (KH/DTile_kh) * (IC/DTile_ic) * (OC/DTile_oc) * filter_multiplier
#of tiles where (weight + bias) are being loaded. (bias is loaded with the oc loop)
NT_wgt_bias = OC/DTile_oc
#of tiles where only weights are being loaded
NT_wgt_only = NT_weight - NT_wgt_bias
#of tiles where psum is written to the DRAM
NT_ps_wrt = (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc)
#of tiles where psum write only happens
NT_ps_wrtonly = (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (OC/DTile_oc)
#of tiles where both psum read and write occur
NT_ps_rdwrt = NT_ps_wrt - NT_ps_wrtonly
#print("NT_weight:", NT_weight, ";", "NT_wgt_bias:", NT_wgt_bias, ";", "NT_wgt_only:", NT_wgt_only)
#print("NT_ps_wrt:", NT_ps_wrt, ";", "NT_ps_wrtonly:", NT_ps_wrtonly, ";", "NT_ps_rdwrt:", NT_ps_rdwrt)
## Performing CASE counts
#CASE-5: #of tiles where weight+bias is being loaded (exclude the first tile)
NT_case5 = NT_wgt_bias - 1
#CASE-4: #of tiles where only weight is being loaded
NT_case4 = NT_wgt_only
#CASE-1: #of tiles where ifmap read and psum write happens (exclude the last 2 tiles)
NT_case1 = (NT_ps_wrtonly - 2) - NT_case5
#CASE-2: #of tiles where ifmap read and psum read+write happens
NT_case2 = NT_ps_rdwrt - NT_case4
#print("NT_case1:", NT_case1, "NT_case2:", NT_case2, "NT_case4:", NT_case4, "NT_case5:", NT_case5)
## place condition to address the situation when tiles from ic, kw, kh, oc, n loops are equal to their original dimensions
if (NT_case2 + NT_case4 == 0):
NT_case2 = 0
NT_case4 = 0
print("NT_case1:", NT_case1, "NT_case2:", NT_case2, "NT_case4:", NT_case4, "NT_case5:", NT_case5)
#The following two tiles are placing as seperate cases for future exception code when WS tiles can be < 3. There it is possible for these cases to be zero
NT_case7 = 1 # The second tile
NT_case8 = 1 # The second last tile
#of cycles required to load/store each tile of each kind of data
WgtTile_load_cycles = math.ceil((DTile_kw * DTile_kh * DTile_ic * DTile_oc * bw_filter) / RBW_DRAM_to_WBUF)
BiasTile_load_cycles = math.ceil((DTile_oc * bw_bias) / RBW_DRAM_to_WBUF)
ifmapTile_load_cycles = math.ceil((DTile_iw * DTile_ih * DTile_ic * DTile_batch * bw_ifmap) / RBW_DRAM_to_IBUF)
psumTile_load_cycles = math.ceil((DTile_ow * DTile_oh * DTile_oc * DTile_batch * bw_psum) / RBW_DRAM_to_OBUF)
psumTile_store_cycles = math.ceil((DTile_ow * DTile_oh * DTile_oc * DTile_batch * bw_psum) / WBW_OBUF_to_DRAM)
#do not need to use 8-bit ofmap, not for the no-fusion version as well. Since SIMD operations are 32 bit and there is always at least a ReLU layer after each
#Conv layer, the output of conv will go to SIMD and the quantization of 32 to 8 bit happens at SIMD. Hence the ofmap from a conv will be 32 bit
#print("computeTile_cycles:", ComputeTile_cycles)
#print("WgtTile_load_cycles:", WgtTile_load_cycles)
#print("BiasTile_load_cycles:", BiasTile_load_cycles)
#print("ifmapTile_load_cycles:", ifmapTile_load_cycles)
#print("psumTile_load_cycles:", psumTile_load_cycles)
#print("psumTile_store_cycles:", psumTile_store_cycles)
# Determining the #of stall cycles for each case
#Case1
L11 = ifmapTile_load_cycles - ComputeTile_cycles
L12 = psumTile_store_cycles - ComputeTile_cycles
stall_case1 = max(0, L11, L12) * NT_case1
#Case2
L21 = ifmapTile_load_cycles - ComputeTile_cycles
L22 = (psumTile_load_cycles + psumTile_store_cycles - ComputeTile_cycles) #one AXI for both read and write of psum.
stall_case2 = max(0, L21, L22) * NT_case2
#Case4
L41 = ifmapTile_load_cycles - ComputeTile_cycles
L42 = WgtTile_load_cycles - ComputeTile_cycles
L43 = (psumTile_load_cycles + psumTile_store_cycles - ComputeTile_cycles) #one AXI for both read and write of psum.
stall_case4 = max(0, L41, L42, L43) * NT_case4
#Case5
L51 = ifmapTile_load_cycles - ComputeTile_cycles
L52 = psumTile_store_cycles - ComputeTile_cycles
L53 = (WgtTile_load_cycles + BiasTile_load_cycles) - ComputeTile_cycles
stall_case5 = max(0, L51, L52, L53) * NT_case5
print("stall_case1:", stall_case1, "; stall_case2:", stall_case2, "; stall_case4:", stall_case4, "; stall_case5:", stall_case5)
#First tile
Lf1 = ifmapTile_load_cycles
Lf2 = WgtTile_load_cycles + BiasTile_load_cycles
stall_first = max(Lf1, Lf2)
#Last tile
stall_last = psumTile_store_cycles
#Case7
L71 = ifmapTile_load_cycles - ComputeTile_cycles
stall_case7 = max(0, L71) * NT_case7
#Case8
L81 = psumTile_store_cycles - ComputeTile_cycles
stall_case8 = max(0, L81)
print("stall_first:", stall_first, "; stall_last:", stall_last, "; stall_case7:", stall_case7, "; stall_case8:", stall_case8)
#of total DRAM stall cycles
DRAM_stall_cycles = stall_case1 + stall_case2 + stall_case4 + stall_case5 + stall_case7 + stall_case8 + stall_first + stall_last
#print("DRAM_stall_cycles:", DRAM_stall_cycles)
SysResult_inflayer.cycles['DRAM_Stall'] = DRAM_stall_cycles
else:
print("WS DRAM stall model do not exist for the input loop order")
elif dataflow == "output_stationary":
print("DARM stall model do not exist yet")
elif dataflow == "input_stationary":
print("DRAM stall model do not exist yet")
else:
print("Invalid dataflow")
return DRAM_stall_cycles
def gemm_access_model(Hardware_param, LayerObj, SysResult_inflayer):
# data access model for fully connected layer (i.e., gemm)
#unpacking the parameters. Doing this unpacking at the beginning of each function
bw_filter = LayerObj.bw_filter; bw_ifmap = LayerObj.bw_ifmap; bw_ofmap = LayerObj.bw_ofmap
bw_psum = LayerObj.bw_psum; bw_bias = LayerObj.bw_bias
OC = LayerObj.OC
IC = LayerObj.IC
Batch = LayerObj.Batch
DTile_oc = LayerObj.DTile_oc
DTile_ic = LayerObj.DTile_ic
DTile_batch = LayerObj.DTile_batch
Loop_order = LayerObj.Loop_order
fusion_status = LayerObj.fusion_status
# Current implementation is for one loop order only, this is sort of the most optimal loop order for gemm analytically.
# So probably no need to implement the support for any loop order for gemm
if Batch > 1 and Loop_order == ['n', 'ic', 'oc']: # weight stationary category
LayerObj.Loop_order = ['ow', 'oh', 'n', 'kw', 'kh', 'ic', 'oc'] # converting FC loop order to convolution loop order
conv_access_model(Hardware_param, LayerObj, SysResult_inflayer)
LayerObj.Loop_order = Loop_order # doing this to retain the original Loop order in LayerObj so that it can be used in later function calls
elif Batch == 1 and Loop_order == ['n', 'ic', 'oc']: #output stationary category
###### Model for DRAM access cost
if (fusion_status == "NoFusion"):
# ifmap access
if math.ceil(IC/DTile_ic) == 1:
ifmap_oc_multiplier = 1 # the loop becomes input stationary wrt DRAM access
else:
ifmap_oc_multiplier = OC/DTile_oc
#print(ifmap_oc_multiplier)
ifmap_access_DRAM = (DTile_ic) * (IC/DTile_ic) * (OC/DTile_oc) * bw_ifmap
# filter access
filter_access_DRAM = (DTile_ic * DTile_oc) * (IC/DTile_ic) * (OC/DTile_oc) * bw_filter # in bit
# ofmap access, no pusm DRAM access since output stationary
ofmap_access_DRAM = (DTile_oc) * (OC/DTile_oc) * bw_ofmap
# bias access
bias_access_DRAM = (DTile_oc) * (OC/DTile_oc) * bw_bias
else:
print("model for fusion do not exist yet")
##### Model for SRAM accesses (Original SRAM access do not depend on fusion)
SRAM_stationary_flag = "NoStationary" # current genesys systolic PE hardware does not support any stationary logic for SRAM accesses
if SRAM_stationary_flag == "NoStationary":
conv_SRAM_access_NoStationary(Hardware_param, LayerObj, SysResult_inflayer)
else:
print("will write generic code for SRAM stationary logic based on dataflow")
SysResult_inflayer.DRAM_access['filter'] = filter_access_DRAM
SysResult_inflayer.DRAM_access['ifmap'] = ifmap_access_DRAM
SysResult_inflayer.DRAM_access['ofmap'] = ofmap_access_DRAM
#SysResult_inflayer.DRAM_access['psum'] = psum_access_DRAM
SysResult_inflayer.DRAM_access['bias'] = bias_access_DRAM
else:
print("The input loop order is not optimal and not supported")
def gemm_cycle_model(Hardware_param, LayerObj, SysResult_inflayer):
#compute cycle and DRAM stall cycle count model for the fully connecetd layer
# unpacking the parameters
SysArray_row = Hardware_param.SysArray_row; SysArray_col = Hardware_param.SysArray_col
OW = LayerObj.OW
OH = LayerObj.OH
OC = LayerObj.OC
KW = LayerObj.KW
KH = LayerObj.KH
IC = LayerObj.IC
IW = LayerObj.IW
IH = LayerObj.IH
Batch = LayerObj.Batch
DTile_ow = LayerObj.DTile_ow
DTile_oh = LayerObj.DTile_oh
DTile_oc = LayerObj.DTile_oc
DTile_kw = LayerObj.DTile_kw
DTile_kh = LayerObj.DTile_kh
DTile_ic = LayerObj.DTile_ic
DTile_iw = LayerObj.DTile_iw
DTile_ih = LayerObj.DTile_ih
DTile_batch = LayerObj.DTile_batch
Stile_ow = LayerObj.Stile_ow
Stile_oh = LayerObj.Stile_oh
Stile_oc = LayerObj.Stile_oc
Stile_kw = LayerObj.Stile_kw
Stile_kh = LayerObj.Stile_kh
Stile_ic = LayerObj.Stile_ic
Stile_iw = LayerObj.Stile_iw
Stile_ih = LayerObj.Stile_ih
Stile_batch = LayerObj.Stile_batch
Loop_order = LayerObj.Loop_order
fusion_status = LayerObj.fusion_status
# Current implementation is for one loop order only, this is sort of the most optimal loop order for gemm analytically.
# So no need to implement the support for any loop order for gemm
if Batch > 1 and Loop_order == ['n', 'ic', 'oc']: # weight stationary category
LayerObj.Loop_order = ['ow', 'oh', 'n', 'kw', 'kh', 'ic', 'oc'] # converting FC loop order to convolution loop order
conv_cycle_model(Hardware_param, LayerObj, SysResult_inflayer)
LayerObj.Loop_order = Loop_order # doing this to retain the original Loop order in LayerObj so that it can be used in later function calls if needed
elif Batch == 1 and Loop_order == ['n', 'ic', 'oc']:
### determining computing cycles, using the convolution equations cause that works
#determining the on-chip compute cycles, compute cycles do not depend on loop order, or fusion
cycle_oneTile = (DTile_ow/Stile_ow) * (DTile_oh/Stile_oh) * (DTile_kw/Stile_kw) * (DTile_kh/Stile_kh) * (DTile_batch/Stile_batch) \
* math.ceil(DTile_ic/Stile_ic) * math.ceil(DTile_oc/Stile_oc)
#print("cycle_oneTile:", cycle_oneTile)
#pipeline overhead for each DRAM tile
pipe_overhead_tile = (SysArray_row - 1) + (SysArray_col - 1) #using PE row and col,
#for now omitting the use of any ceil since DRAM tile size will be integer multiple of loops,
Number_of_Tile = (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc)
compute_cycles = math.ceil((cycle_oneTile + pipe_overhead_tile) * Number_of_Tile) # giving the outer ceil to avoid fraction cycle numbers
#print("compute_cycles:", compute_cycles)
SysResult_inflayer.cycles['compute'] = compute_cycles
#of cycles to compute one tile including the pipeline setup operhead, need this variable to compute DRAM stall cycles
ComputeTile_cycles = cycle_oneTile + pipe_overhead_tile
######## model for the DRAM stall cycles, depends on loop order, fusion etc
if (fusion_status == "NoFusion"): #Model for the version where there is no fusion
DRAM_stall_cycles = gemmb1_stall_model_nofu(Hardware_param, LayerObj, ComputeTile_cycles, SysResult_inflayer) # stall model for batch = 1, output stationary
else:
print("model for fusion do not exist yet")
SysResult_inflayer.cycles['total'] = compute_cycles + DRAM_stall_cycles
####### Counting number of MAC operations: using the convolution equations cause that works (ceiling affects cycle count and #of MAC differently)
PE_tile_mac = (Stile_ow * Stile_oh * Stile_oc * Stile_batch) * (Stile_ic * Stile_kw * Stile_kh)
SRAM_tile_mac = PE_tile_mac * (DTile_ow/Stile_ow) * (DTile_oh/Stile_oh) * (DTile_kw/Stile_kw) * (DTile_kh/Stile_kh) * (DTile_batch/Stile_batch) \
* (DTile_ic/Stile_ic) * (DTile_oc/Stile_oc)
Nos_of_mac = SRAM_tile_mac * (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc)
print("Nos of MAC:", Nos_of_mac)
SysResult_inflayer.arithmetic['mac'] = Nos_of_mac
else:
print("The input loop order is not optimal and not supported")
def gemmb1_stall_model_nofu(Hardware_param, LayerObj, ComputeTile_cycles, SysResult_inflayer):
#DRAM stall cycle count model for the gemm layer when there is no fusion, batch size = 1, output stationary
bw_filter = LayerObj.bw_filter; bw_ifmap = LayerObj.bw_ifmap; bw_ofmap = LayerObj.bw_ofmap
bw_psum = LayerObj.bw_psum; bw_bias = LayerObj.bw_bias
RBW_DRAM_to_WBUF = Hardware_param.RBW_DRAM_to_WBUF # in bit/cycle, bias is also loaded through the same AXI interface
RBW_DRAM_to_IBUF = Hardware_param.RBW_DRAM_to_IBUF
RBW_DRAM_to_OBUF = Hardware_param.RBW_DRAM_to_OBUF
WBW_OBUF_to_DRAM = Hardware_param.WBW_OBUF_to_DRAM
OC = LayerObj.OC
IC = LayerObj.IC
Batch = LayerObj.Batch
DTile_oc = LayerObj.DTile_oc
DTile_ic = LayerObj.DTile_ic
DTile_batch = LayerObj.DTile_batch
#of cycles required to load/store each tile of each kind of data
WgtTile_load_cycles = math.ceil((DTile_ic * DTile_oc * bw_filter) / RBW_DRAM_to_WBUF)
BiasTile_load_cycles = math.ceil((DTile_oc * bw_bias) / RBW_DRAM_to_WBUF)
ifmapTile_load_cycles = math.ceil((DTile_ic * bw_ifmap) / RBW_DRAM_to_IBUF)
ofmapTile_store_cycles = math.ceil((DTile_oc * bw_ofmap) / WBW_OBUF_to_DRAM)
#do not need to use 8-bit ofmap, not for the no-fusion version as well. Since SIMD operations are 32 bit and there is always at least a ReLU layer after each
#Conv layer, the output of conv will go to SIMD and the quantization of 32 to 8 bit happens at SIMD. Hence the ofmap from a conv will be 32 bit
#print("ComputeTile_cycles:", ComputeTile_cycles)
#print("WgtTile_load_cycles:", WgtTile_load_cycles)
if math.ceil(IC/DTile_ic) == 1:
dataflow = "input_stationary"
else:
dataflow = "output_stationary"
if dataflow == "input_stationary":
## Performing CASE counts, there is only one case
#Case 1: #of tiles where weight+bias is loaded, and ofmap write occurs, except the first two and last two tiles
NT_case1 = (OC/DTile_oc) - 2
# Using this condition to seperately address the situation when OC/DTile_oc is also 1 and NT_case1 becomes negative
NT_case1_flag = "None"
if NT_case1 < 0:
NT_case1 = 0
NT_case1_flag = "Negative"
#print("NT_case1:", NT_case1)
# Determining the #of stall cycles for each case
#Case1
L11 = (WgtTile_load_cycles + BiasTile_load_cycles) - ComputeTile_cycles
L12 = ofmapTile_store_cycles - ComputeTile_cycles
stall_case1 = max(0, L11, L12) * NT_case1
#First tile
Lf1 = ifmapTile_load_cycles
Lf2 = WgtTile_load_cycles + BiasTile_load_cycles
stall_first = max(Lf1, Lf2)
#Second tile
L2nd = (WgtTile_load_cycles + BiasTile_load_cycles) - ComputeTile_cycles
stall_second = max(0, L2nd)
#Second last tile
L2ndlst = ofmapTile_store_cycles - ComputeTile_cycles
stall_secondlast = max(0, L2ndlst)
#Last tile
stall_last = ofmapTile_store_cycles
#print("stall_case1:", stall_case1, "; stall_first:", stall_first, "; stall_second:", stall_second, "; stall_secondlast:", stall_secondlast,\
# "; stall_last:", stall_last)
#of total DRAM stall cycles
if NT_case1_flag == "Negative":
DRAM_stall_cycles = stall_first + stall_last
else:
DRAM_stall_cycles = stall_case1 + stall_first + stall_second + stall_secondlast + stall_last
elif dataflow == "output_stationary":
#of tiles where weights are being loaded (regardless of bias)
NT_weight = (IC/DTile_ic) * (OC/DTile_oc)
#of tiles where (weight + bias) are being loaded. (bias is loaded with the oc loop)
NT_wgt_bias = OC/DTile_oc
#of tiles where only weights are being loaded
NT_wgt_only = NT_weight - NT_wgt_bias
#of tiles where ofmap is written to the DRAM
NT_ofmap_wrt = (OC/DTile_oc)
#print("NT_weight:", NT_weight, ";", "NT_wgt_bias:", NT_wgt_bias, ";", "NT_wgt_only:", NT_wgt_only, "; NT_ofmap_wrt:", NT_ofmap_wrt)
## Performing CASE counts
#CASE-1: #of tiles where weight+bias is being loaded (exclude the first tile)
NT_case1 = NT_wgt_bias - 1
#CASE-4: #of tiles where ofmap write occurs (excluding the last tile, ofmap write does not happen at the second last tile)
NT_case4 = NT_ofmap_wrt - 1
#CASE-3: #of tiles where weightonly read and ifmap read happens (excluding the second tile)
NT_case3 = (NT_wgt_only - 1) - NT_case4
#print("NT_case1:", NT_case1, "NT_case3:", NT_case3, "NT_case4:", NT_case4)
# Determining the #of stall cycles for each case
#Case1
L11 = (WgtTile_load_cycles + BiasTile_load_cycles) - ComputeTile_cycles
L12 = ifmapTile_load_cycles - ComputeTile_cycles
stall_case1 = max(0, L11, L12) * NT_case1
#Case3
L31 = WgtTile_load_cycles - ComputeTile_cycles
L32 = ifmapTile_load_cycles - ComputeTile_cycles
stall_case3 = max(0, L31, L32) * NT_case3
#Case4
L41 = WgtTile_load_cycles - ComputeTile_cycles
L42 = ifmapTile_load_cycles - ComputeTile_cycles
L43 = ofmapTile_store_cycles - ComputeTile_cycles
stall_case4 = max(0, L41, L42, L43) * NT_case4
#print("stall_case1:", stall_case1, "; stall_case3:", stall_case3, "; stall_case4:", stall_case4)
#First tile
Lf1 = ifmapTile_load_cycles
Lf2 = WgtTile_load_cycles + BiasTile_load_cycles
stall_first = max(Lf1, Lf2)
#Second tile
L2nd1 = WgtTile_load_cycles - ComputeTile_cycles
L2nd2 = ifmapTile_load_cycles - ComputeTile_cycles
stall_second = max(0, L2nd1, L2nd2)
#Second last tile, there is no data read/write for the second last tile, only compute
stall_secondlast = 0
#Last tile
stall_last = ofmapTile_store_cycles
#print("stall_first:", stall_first, "; stall_second:", stall_second, "; stall_secondlast:", stall_secondlast, "; stall_last:", stall_last)
#of total DRAM stall cycles
DRAM_stall_cycles = stall_case1 + stall_case3 + stall_case4 + stall_first + stall_second + stall_secondlast + stall_last
#print("DRAM_stall_cycles:", DRAM_stall_cycles)
SysResult_inflayer.cycles['DRAM_Stall'] = DRAM_stall_cycles
return DRAM_stall_cycles
|
[
"math.ceil"
] |
[((13366, 13430), 'math.ceil', 'math.ceil', (['((cycle_oneTile + pipe_overhead_tile) * Number_of_Tile)'], {}), '((cycle_oneTile + pipe_overhead_tile) * Number_of_Tile)\n', (13375, 13430), False, 'import math\n'), ((33418, 33479), 'math.ceil', 'math.ceil', (['(DTile_ic * DTile_oc * bw_filter / RBW_DRAM_to_WBUF)'], {}), '(DTile_ic * DTile_oc * bw_filter / RBW_DRAM_to_WBUF)\n', (33427, 33479), False, 'import math\n'), ((33509, 33557), 'math.ceil', 'math.ceil', (['(DTile_oc * bw_bias / RBW_DRAM_to_WBUF)'], {}), '(DTile_oc * bw_bias / RBW_DRAM_to_WBUF)\n', (33518, 33557), False, 'import math\n'), ((33588, 33637), 'math.ceil', 'math.ceil', (['(DTile_ic * bw_ifmap / RBW_DRAM_to_IBUF)'], {}), '(DTile_ic * bw_ifmap / RBW_DRAM_to_IBUF)\n', (33597, 33637), False, 'import math\n'), ((33669, 33718), 'math.ceil', 'math.ceil', (['(DTile_oc * bw_ofmap / WBW_OBUF_to_DRAM)'], {}), '(DTile_oc * bw_ofmap / WBW_OBUF_to_DRAM)\n', (33678, 33718), False, 'import math\n'), ((9231, 9255), 'math.ceil', 'math.ceil', (['(OC / DTile_oc)'], {}), '(OC / DTile_oc)\n', (9240, 9255), False, 'import math\n'), ((10249, 10273), 'math.ceil', 'math.ceil', (['(KH / DTile_kh)'], {}), '(KH / DTile_kh)\n', (10258, 10273), False, 'import math\n'), ((12922, 12952), 'math.ceil', 'math.ceil', (['(DTile_oc / Stile_oc)'], {}), '(DTile_oc / Stile_oc)\n', (12931, 12952), False, 'import math\n'), ((34150, 34174), 'math.ceil', 'math.ceil', (['(IC / DTile_ic)'], {}), '(IC / DTile_ic)\n', (34159, 34174), False, 'import math\n'), ((10199, 10223), 'math.ceil', 'math.ceil', (['(IC / DTile_ic)'], {}), '(IC / DTile_ic)\n', (10208, 10223), False, 'import math\n'), ((10224, 10248), 'math.ceil', 'math.ceil', (['(KW / DTile_kw)'], {}), '(KW / DTile_kw)\n', (10233, 10248), False, 'import math\n'), ((12891, 12921), 'math.ceil', 'math.ceil', (['(DTile_ic / Stile_ic)'], {}), '(DTile_ic / Stile_ic)\n', (12900, 12921), False, 'import math\n'), ((20957, 21044), 'math.ceil', 'math.ceil', (['(DTile_kw * DTile_kh * DTile_ic * DTile_oc * bw_filter / RBW_DRAM_to_WBUF)'], {}), '(DTile_kw * DTile_kh * DTile_ic * DTile_oc * bw_filter /\n RBW_DRAM_to_WBUF)\n', (20966, 21044), False, 'import math\n'), ((21078, 21126), 'math.ceil', 'math.ceil', (['(DTile_oc * bw_bias / RBW_DRAM_to_WBUF)'], {}), '(DTile_oc * bw_bias / RBW_DRAM_to_WBUF)\n', (21087, 21126), False, 'import math\n'), ((21165, 21254), 'math.ceil', 'math.ceil', (['(DTile_iw * DTile_ih * DTile_ic * DTile_batch * bw_ifmap / RBW_DRAM_to_IBUF)'], {}), '(DTile_iw * DTile_ih * DTile_ic * DTile_batch * bw_ifmap /\n RBW_DRAM_to_IBUF)\n', (21174, 21254), False, 'import math\n'), ((21288, 21376), 'math.ceil', 'math.ceil', (['(DTile_ow * DTile_oh * DTile_oc * DTile_batch * bw_psum / RBW_DRAM_to_OBUF)'], {}), '(DTile_ow * DTile_oh * DTile_oc * DTile_batch * bw_psum /\n RBW_DRAM_to_OBUF)\n', (21297, 21376), False, 'import math\n'), ((21411, 21499), 'math.ceil', 'math.ceil', (['(DTile_ow * DTile_oh * DTile_oc * DTile_batch * bw_psum / WBW_OBUF_to_DRAM)'], {}), '(DTile_ow * DTile_oh * DTile_oc * DTile_batch * bw_psum /\n WBW_OBUF_to_DRAM)\n', (21420, 21499), False, 'import math\n'), ((30688, 30752), 'math.ceil', 'math.ceil', (['((cycle_oneTile + pipe_overhead_tile) * Number_of_Tile)'], {}), '((cycle_oneTile + pipe_overhead_tile) * Number_of_Tile)\n', (30697, 30752), False, 'import math\n'), ((9498, 9528), 'math.ceil', 'math.ceil', (['(DTile_oc / Stile_oc)'], {}), '(DTile_oc / Stile_oc)\n', (9507, 9528), False, 'import math\n'), ((10050, 10086), 'math.ceil', 'math.ceil', (['(DTile_batch / Stile_batch)'], {}), '(DTile_batch / Stile_batch)\n', (10059, 10086), False, 'import math\n'), ((30200, 30230), 'math.ceil', 'math.ceil', (['(DTile_oc / Stile_oc)'], {}), '(DTile_oc / Stile_oc)\n', (30209, 30230), False, 'import math\n'), ((2484, 2508), 'math.ceil', 'math.ceil', (['(OC / DTile_oc)'], {}), '(OC / DTile_oc)\n', (2493, 2508), False, 'import math\n'), ((10017, 10047), 'math.ceil', 'math.ceil', (['(DTile_oh / Stile_oh)'], {}), '(DTile_oh / Stile_oh)\n', (10026, 10047), False, 'import math\n'), ((26274, 26298), 'math.ceil', 'math.ceil', (['(IC / DTile_ic)'], {}), '(IC / DTile_ic)\n', (26283, 26298), False, 'import math\n'), ((30169, 30199), 'math.ceil', 'math.ceil', (['(DTile_ic / Stile_ic)'], {}), '(DTile_ic / Stile_ic)\n', (30178, 30199), False, 'import math\n'), ((6049, 6073), 'math.ceil', 'math.ceil', (['(KH / DTile_kh)'], {}), '(KH / DTile_kh)\n', (6058, 6073), False, 'import math\n'), ((9674, 9704), 'math.ceil', 'math.ceil', (['(Batch / DTile_batch)'], {}), '(Batch / DTile_batch)\n', (9683, 9704), False, 'import math\n'), ((9984, 10014), 'math.ceil', 'math.ceil', (['(DTile_ow / Stile_ow)'], {}), '(DTile_ow / Stile_ow)\n', (9993, 10014), False, 'import math\n'), ((6024, 6048), 'math.ceil', 'math.ceil', (['(KW / DTile_kw)'], {}), '(KW / DTile_kw)\n', (6033, 6048), False, 'import math\n'), ((9620, 9644), 'math.ceil', 'math.ceil', (['(OW / DTile_ow)'], {}), '(OW / DTile_ow)\n', (9629, 9644), False, 'import math\n'), ((9647, 9671), 'math.ceil', 'math.ceil', (['(OH / DTile_oh)'], {}), '(OH / DTile_oh)\n', (9656, 9671), False, 'import math\n'), ((10620, 10650), 'math.ceil', 'math.ceil', (['(DTile_kh / Stile_kh)'], {}), '(DTile_kh / Stile_kh)\n', (10629, 10650), False, 'import math\n'), ((5999, 6023), 'math.ceil', 'math.ceil', (['(IC / DTile_ic)'], {}), '(IC / DTile_ic)\n', (6008, 6023), False, 'import math\n'), ((10589, 10619), 'math.ceil', 'math.ceil', (['(DTile_kw / Stile_kw)'], {}), '(DTile_kw / Stile_kw)\n', (10598, 10619), False, 'import math\n'), ((10558, 10588), 'math.ceil', 'math.ceil', (['(DTile_ic / Stile_ic)'], {}), '(DTile_ic / Stile_ic)\n', (10567, 10588), False, 'import math\n')]
|
from dataclasses import dataclass
from pendulum import now
from rich.console import ConsoleRenderable
from rich.style import Style
from rich.table import Column, Table
from rich.text import Text
from spiel.modes import Mode
from spiel.rps import RPSCounter
from spiel.state import State
from spiel.utils import drop_nones, filter_join
@dataclass
class Footer:
state: State
rps_counter: RPSCounter
@property
def longest_slide_number_length(self) -> int:
num_slides = len(self.state.deck)
return len(str(num_slides))
def __rich__(self) -> ConsoleRenderable:
grid = Table.grid(
*drop_nones(
Column(
style=Style(dim=True),
justify="left",
),
Column(
style=Style(bold=True),
justify="center",
),
Column(
style=Style(dim=True),
justify="right",
)
if self.state.options.profiling
else None,
Column(
style=Style(dim=True),
justify="right",
),
Column(
style=Style(dim=True),
justify="right",
),
),
expand=True,
padding=1,
)
grid.add_row(
*drop_nones(
Text(
filter_join(
" | ",
[
self.state.deck.name,
self.state.current_slide.title
if self.state.mode is Mode.SLIDE
else None,
],
)
),
self.state.message,
Text(
f"Render Time: {self.rps_counter.last_elapsed_render_time() * 1e3:>3.3f} ms | {self.rps_counter.renders_per_second():.2f} RPS"
)
if self.state.options.profiling
else None,
now().format(self.state.options.footer_time_format),
Text(
f"[{self.state.current_slide_idx + 1:>0{self.longest_slide_number_length}d} / {len(self.state.deck)}]"
)
if self.state.mode is not Mode.HELP
else Text(Mode.HELP.value, style=Style(italic=True)),
)
)
return grid
|
[
"pendulum.now",
"rich.style.Style",
"spiel.utils.filter_join"
] |
[((1502, 1623), 'spiel.utils.filter_join', 'filter_join', (['""" | """', '[self.state.deck.name, self.state.current_slide.title if self.state.mode is\n Mode.SLIDE else None]'], {}), "(' | ', [self.state.deck.name, self.state.current_slide.title if\n self.state.mode is Mode.SLIDE else None])\n", (1513, 1623), False, 'from spiel.utils import drop_nones, filter_join\n'), ((700, 715), 'rich.style.Style', 'Style', ([], {'dim': '(True)'}), '(dim=True)\n', (705, 715), False, 'from rich.style import Style\n'), ((822, 838), 'rich.style.Style', 'Style', ([], {'bold': '(True)'}), '(bold=True)\n', (827, 838), False, 'from rich.style import Style\n'), ((1144, 1159), 'rich.style.Style', 'Style', ([], {'dim': '(True)'}), '(dim=True)\n', (1149, 1159), False, 'from rich.style import Style\n'), ((1267, 1282), 'rich.style.Style', 'Style', ([], {'dim': '(True)'}), '(dim=True)\n', (1272, 1282), False, 'from rich.style import Style\n'), ((2163, 2168), 'pendulum.now', 'now', ([], {}), '()\n', (2166, 2168), False, 'from pendulum import now\n'), ((947, 962), 'rich.style.Style', 'Style', ([], {'dim': '(True)'}), '(dim=True)\n', (952, 962), False, 'from rich.style import Style\n'), ((2480, 2498), 'rich.style.Style', 'Style', ([], {'italic': '(True)'}), '(italic=True)\n', (2485, 2498), False, 'from rich.style import Style\n')]
|
if __name__ == "__main__":
from core.editor import Editor
from widgets.editor_window import EditorWindow
from PyQt5 import QtWidgets
editor = Editor()
application = QtWidgets.QApplication([])
window = EditorWindow()
window.show()
application.exec()
|
[
"PyQt5.QtWidgets.QApplication",
"widgets.editor_window.EditorWindow",
"core.editor.Editor"
] |
[((160, 168), 'core.editor.Editor', 'Editor', ([], {}), '()\n', (166, 168), False, 'from core.editor import Editor\n'), ((188, 214), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['[]'], {}), '([])\n', (210, 214), False, 'from PyQt5 import QtWidgets\n'), ((228, 242), 'widgets.editor_window.EditorWindow', 'EditorWindow', ([], {}), '()\n', (240, 242), False, 'from widgets.editor_window import EditorWindow\n')]
|
from django.db import models
from meiduo_mell.utils.models import BaseModel
from users.models import User
# Create your models here.
class OauthQQUser(BaseModel):
"""
QQ登录用户数据
"""
# ForeignKey: 设置外键
# on_delete: 指明主表删除数据时,对于外键引用表数据如何处理
# CASCADE 级联,删除主表数据时连通一起删除外键表中数据
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='用户')
# db_index: 若值为True, 则在表中会为此字段创建索引,默认值是False
# 对于频繁查询的字段, 创建索引能够提升查询效率
openid = models.CharField(max_length=64, verbose_name='openid', db_index=True)
class Meta:
db_table = 'tb_oauth_qq' # 指明数据库表名
verbose_name = '用户登录数据' # 显示admin站点中的名称
verbose_name_plural = verbose_name # 显示的复数名称
|
[
"django.db.models.ForeignKey",
"django.db.models.CharField"
] |
[((308, 376), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'verbose_name': '"""用户"""'}), "(User, on_delete=models.CASCADE, verbose_name='用户')\n", (325, 376), False, 'from django.db import models\n'), ((469, 538), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'verbose_name': '"""openid"""', 'db_index': '(True)'}), "(max_length=64, verbose_name='openid', db_index=True)\n", (485, 538), False, 'from django.db import models\n')]
|
# -*- coding: UTF-8 -*-
from .dataset import IcvDataSet
from ..utils import is_seq, is_dir
from ..image import imwrite
from ..data.core.bbox import BBox
from ..data.core.polys import Polygon
from ..data.core.sample import Sample, Anno
from ..data.core.meta import AnnoMeta,SampleMeta
from ..vis.color import VIS_COLOR
import random
import os
import json
import shutil
from tqdm import tqdm
class LabelMe(IcvDataSet):
def __init__(self, image_anno_path_list, split="trainval", keep_no_anno_image=True, categories=None,
one_index=False):
assert is_seq(image_anno_path_list)
image_anno_path_list = list(image_anno_path_list)
image_path_list, anno_path_list = list(zip(*image_anno_path_list))
self.split = split
self.keep_no_anno_image = keep_no_anno_image
self.one_index = one_index
self.ids = [os.path.basename(_).rsplit(".", 1)[0] for _ in image_path_list]
self.id2imgpath = {id: image_path_list[ix] for ix, id in enumerate(self.ids)}
self.id2annopath = {id: anno_path_list[ix] for ix, id in enumerate(self.ids)}
self.sample_db = {}
self.color_map = {}
self.get_samples()
self.categories = categories if categories is not None else self.parse_categories()
super(LabelMe, self).__init__(self.ids, self.categories, self.keep_no_anno_image, one_index)
print("there have %d samples in LabelMe dataset" % len(self.ids))
print("there have %d categories in LabelMe dataset" % len(self.categories))
def save(self, output_dir, reset_dir=False, split=None):
anno_path, image_path = LabelMe.reset_dir(output_dir, reset=reset_dir)
for id in self.ids:
self._write(self.get_sample(id), anno_path, image_path)
@staticmethod
def reset_dir(dist_dir, reset=False):
if not reset:
assert is_dir(dist_dir)
if reset and os.path.exists(dist_dir):
shutil.rmtree(dist_dir)
anno_path = os.path.join(dist_dir, "annotations")
image_path = os.path.join(dist_dir, "images")
for _path in [anno_path, image_path]:
if reset or not is_dir(_path):
os.makedirs(_path)
return anno_path, image_path
def _get_bbox_from_points(self, points):
"""
根据polygon顶点获取bbox
:param points:
:return:
"""
x_list = [p[0] for p in points]
y_list = [p[1] for p in points]
xmin = min(x_list)
ymin = min(y_list)
xmax = max(x_list)
ymax = max(y_list)
return xmin, ymin, xmax, ymax
def get_sample(self, id):
"""
get sample
:param id: image name
:return:
"""
if id in self.sample_db:
return self.sample_db[id]
anno_file = self.id2annopath[id]
anno_data = json.load(open(anno_file, "r"))
img_file = self.id2imgpath[id]
annos = []
if "shapes" in anno_data:
shapes = anno_data["shapes"]
for shape in shapes:
if "shape_type" not in shape or "points" not in shape or "label" not in shape:
continue
points = shape["points"]
xmin, ymin, xmax, ymax = self._get_bbox_from_points(points)
label = shape["label"]
if label not in self.color_map:
self.color_map[label] = random.choice(VIS_COLOR)
anno = Anno(
bbox=BBox(xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax, label=label),
label=label,
color=self.color_map[label],
polys=Polygon.init_from(points, label=label) if shape["shape_type"] == "polygon" else None,
meta=AnnoMeta()
)
annos.append(anno)
sample = Sample(
name=id,
image=img_file,
annos=annos,
meta=SampleMeta()
)
self.sample_db[id] = sample
return sample
def _write(self, anno_sample, anno_path, img_path):
assert isinstance(anno_sample, Sample)
if is_dir(anno_path):
anno_path = os.path.join(anno_path, "%s.json" % anno_sample.name)
if is_dir(img_path):
img_path = os.path.join(img_path, "%s.jpg" % anno_sample.name)
imwrite(anno_sample.image, img_path)
anno_json = {
"shapes": [],
"imagePath": img_path,
"imageHeight": anno_sample.height,
"imageWidth": anno_sample.width
}
for anno in anno_sample.annos:
shape = {
"label": anno.label,
"shape_type": "polygon" if anno.seg_mode else "rectangle"
}
if anno.seg_mode_polys:
shape["points"] = anno.polys.exterior.tolist()
elif anno.seg_mode_mask:
shape["points"] = anno.mask.to_ploygons().exterior.tolist()
else:
shape["points"] = [[anno.bbox.xmin, anno.bbox.ymin], [anno.bbox.xmax, anno.bbox.ymax]]
anno_json["shapes"].append(shape)
json.dump(anno_json, open(anno_path, "w"))
def vis(self, id=None, with_bbox=True, with_seg=True, is_show=False, save_dir=None, reset_dir=False):
if save_dir is not None:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
elif reset_dir:
shutil.rmtree(save_dir)
os.makedirs(save_dir)
if id is not None:
sample = self.get_sample(id)
save_path = None if save_dir is None else os.path.join(save_dir, "%s.jpg" % sample.name)
return sample.vis(with_bbox=with_bbox, with_seg=with_seg, is_show=is_show, save_path=save_path)
image_vis = []
for id in tqdm(self.ids):
sample = self.get_sample(id)
save_path = None if save_dir is None else os.path.join(save_dir, "%s.jpg" % sample.name)
image = sample.vis(with_bbox=with_bbox, with_seg=with_seg, is_show=False, save_path=save_path)
image_vis.append(image)
return image_vis
|
[
"tqdm.tqdm",
"os.makedirs",
"os.path.basename",
"os.path.exists",
"random.choice",
"shutil.rmtree",
"os.path.join"
] |
[((2005, 2042), 'os.path.join', 'os.path.join', (['dist_dir', '"""annotations"""'], {}), "(dist_dir, 'annotations')\n", (2017, 2042), False, 'import os\n'), ((2064, 2096), 'os.path.join', 'os.path.join', (['dist_dir', '"""images"""'], {}), "(dist_dir, 'images')\n", (2076, 2096), False, 'import os\n'), ((5892, 5906), 'tqdm.tqdm', 'tqdm', (['self.ids'], {}), '(self.ids)\n', (5896, 5906), False, 'from tqdm import tqdm\n'), ((1922, 1946), 'os.path.exists', 'os.path.exists', (['dist_dir'], {}), '(dist_dir)\n', (1936, 1946), False, 'import os\n'), ((1960, 1983), 'shutil.rmtree', 'shutil.rmtree', (['dist_dir'], {}), '(dist_dir)\n', (1973, 1983), False, 'import shutil\n'), ((4234, 4287), 'os.path.join', 'os.path.join', (['anno_path', "('%s.json' % anno_sample.name)"], {}), "(anno_path, '%s.json' % anno_sample.name)\n", (4246, 4287), False, 'import os\n'), ((4341, 4392), 'os.path.join', 'os.path.join', (['img_path', "('%s.jpg' % anno_sample.name)"], {}), "(img_path, '%s.jpg' % anno_sample.name)\n", (4353, 4392), False, 'import os\n'), ((2203, 2221), 'os.makedirs', 'os.makedirs', (['_path'], {}), '(_path)\n', (2214, 2221), False, 'import os\n'), ((5402, 5426), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (5416, 5426), False, 'import os\n'), ((5444, 5465), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (5455, 5465), False, 'import os\n'), ((5695, 5741), 'os.path.join', 'os.path.join', (['save_dir', "('%s.jpg' % sample.name)"], {}), "(save_dir, '%s.jpg' % sample.name)\n", (5707, 5741), False, 'import os\n'), ((6003, 6049), 'os.path.join', 'os.path.join', (['save_dir', "('%s.jpg' % sample.name)"], {}), "(save_dir, '%s.jpg' % sample.name)\n", (6015, 6049), False, 'import os\n'), ((3451, 3475), 'random.choice', 'random.choice', (['VIS_COLOR'], {}), '(VIS_COLOR)\n', (3464, 3475), False, 'import random\n'), ((5510, 5533), 'shutil.rmtree', 'shutil.rmtree', (['save_dir'], {}), '(save_dir)\n', (5523, 5533), False, 'import shutil\n'), ((5550, 5571), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (5561, 5571), False, 'import os\n'), ((873, 892), 'os.path.basename', 'os.path.basename', (['_'], {}), '(_)\n', (889, 892), False, 'import os\n')]
|
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import config as cfg
sns.set_style('darkgrid')
sns.set_context('notebook')
sns.despine(trim=True)
plt.close('all')
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
scores = np.load(op.join(cfg.path_outputs,
'all_scores_learning_curves.npy')).item()
train_sizes = scores['train_sizes']
train_scores = scores['train_scores']
test_scores = scores['test_scores']
train_mean = - np.mean(train_scores, axis=1)
train_std = - np.std(train_scores, axis=1)
test_mean = - np.mean(test_scores, axis=1)
test_std = - np.std(test_scores, axis=1)
ax.plot(train_sizes, train_mean, 'b--', lw=2, label="Training score")
ax.fill_between(train_sizes, train_mean - train_std, train_mean + train_std,
alpha=0.1)
ax.plot(train_sizes, test_mean, 'b-', label="CV score")
ax.fill_between(train_sizes, test_mean - test_std, test_mean + test_std,
alpha=0.1, color="b")
# ax.set_xticks(train_sizes)
ax.set_xlabel("Number of training examples")
ax.set_ylabel("MAE", rotation=0)
# ax.set_title('Learning Curve (SpatialFilter + Riemann)')
ax.legend()
plt.tight_layout()
plt.savefig(op.join(cfg.path_outputs, 'plot_MAE_learning_curves.png'),
dpi=300)
|
[
"seaborn.set_style",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"numpy.std",
"matplotlib.pyplot.close",
"seaborn.despine",
"numpy.mean",
"matplotlib.pyplot.subplots",
"seaborn.set_context"
] |
[((118, 143), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (131, 143), True, 'import seaborn as sns\n'), ((144, 171), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {}), "('notebook')\n", (159, 171), True, 'import seaborn as sns\n'), ((172, 194), 'seaborn.despine', 'sns.despine', ([], {'trim': '(True)'}), '(trim=True)\n', (183, 194), True, 'import seaborn as sns\n'), ((195, 211), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (204, 211), True, 'import matplotlib.pyplot as plt\n'), ((222, 256), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6)'}), '(1, 1, figsize=(8, 6))\n', (234, 256), True, 'import matplotlib.pyplot as plt\n'), ((1166, 1184), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1182, 1184), True, 'import matplotlib.pyplot as plt\n'), ((486, 515), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (493, 515), True, 'import numpy as np\n'), ((530, 558), 'numpy.std', 'np.std', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (536, 558), True, 'import numpy as np\n'), ((573, 601), 'numpy.mean', 'np.mean', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (580, 601), True, 'import numpy as np\n'), ((615, 642), 'numpy.std', 'np.std', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (621, 642), True, 'import numpy as np\n'), ((1197, 1254), 'os.path.join', 'op.join', (['cfg.path_outputs', '"""plot_MAE_learning_curves.png"""'], {}), "(cfg.path_outputs, 'plot_MAE_learning_curves.png')\n", (1204, 1254), True, 'import os.path as op\n'), ((275, 334), 'os.path.join', 'op.join', (['cfg.path_outputs', '"""all_scores_learning_curves.npy"""'], {}), "(cfg.path_outputs, 'all_scores_learning_curves.npy')\n", (282, 334), True, 'import os.path as op\n')]
|
import pdb
import torch
from torch import nn
import torch.nn.functional as F
class Mish(nn.Module):
def __init__(self):
super().__init__()
print('Mish activation loaded....')
def forward(self, x):
x = x * (torch.tanh(F.softplus(x)))
return x
|
[
"torch.nn.functional.softplus"
] |
[((251, 264), 'torch.nn.functional.softplus', 'F.softplus', (['x'], {}), '(x)\n', (261, 264), True, 'import torch.nn.functional as F\n')]
|
""" Convolutional Neural Network.
Build and train a convolutional neural network with TensorFlow.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
This example is using TensorFlow layers API, see 'convolutional_network_raw'
example for a raw implementation with variables.
Author: <NAME>
Project: https://github.com/aymericdamien/TensorFlow-Examples/
"""
from __future__ import division, print_function, absolute_import
from tensorflow.python.tools import freeze_graph
import numpy as np
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
import tensorflow as tf
# Training Parameters
learning_rate = 0.001
num_steps = 2000
batch_size = 128
# Network Parameters
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.75 # Dropout, probability to keep units
# Create the neural network
def conv_net(x_dict, n_classes, dropout, reuse, is_training):
# Define a scope for reusing the variables
# TF Estimator input is a dict, in case of multiple inputs
x = x_dict['images']
# MNIST data input is a 1-D vector of 784 features (28*28 pixels)
# Reshape to match picture format [Height x Width x Channel]
# Tensor input become 4-D: [Batch Size, Height, Width, Channel]
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution Layer with 32 filters and a kernel size of 5
conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
# Convolution Layer with 64 filters and a kernel size of 3
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
# Flatten the data to a 1-D vector for the fully connected layer
fc1 = tf.contrib.layers.flatten(conv2)
# Fully connected layer (in tf contrib folder for now)
fc1 = tf.layers.dense(fc1, 1024)
# Apply Dropout (if is_training is False, dropout is not applied)
fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
# Output layer, class prediction
out = tf.layers.dense(fc1, n_classes)
return out
from tensorflow.python.estimator.export import export
with tf.Session() as sess:
# Build the Estimator
feature_spec = {'images': tf.constant(mnist.train.images)}
serving_input_fn = export.build_raw_serving_input_receiver_fn(feature_spec)
# Train the Model
# Evaluate the Model
# Define the input function for evaluating
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.test.images}, y=mnist.test.labels,
batch_size=batch_size, shuffle=False)
# Define a scope for reusing the variables
# TF Estimator input is a dict, in case of multiple inputs
x = mnist.test.images
is_training = False
n_classes = 10
# MNIST data input is a 1-D vector of 784 features (28*28 pixels)
# Reshape to match picture format [Height x Width x Channel]
# Tensor input become 4-D: [Batch Size, Height, Width, Channel]
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution Layer with 32 filters and a kernel size of 5
conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
# Convolution Layer with 64 filters and a kernel size of 3
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
# Flatten the data to a 1-D vector for the fully connected layer
fc1 = tf.contrib.layers.flatten(conv2)
# Fully connected layer (in tf contrib folder for now)
fc1 = tf.layers.dense(fc1, 1024)
# Apply Dropout (if is_training is False, dropout is not applied)
fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
# Use the Estimator 'evaluate' method
# Output layer, class prediction
out = tf.layers.dense(fc1, n_classes,name='output')
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
tf.train.write_graph(sess.graph_def, 'lenet_dir', 'lenet.pbtxt',as_text=True)
saver.save(sess, 'lenet_dir/test3.ckpt',write_meta_graph=False)
|
[
"tensorflow.python.estimator.export.export.build_raw_serving_input_receiver_fn",
"tensorflow.global_variables_initializer",
"tensorflow.contrib.layers.flatten",
"tensorflow.reshape",
"tensorflow.layers.dense",
"tensorflow.layers.dropout",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.train.write_graph",
"tensorflow.global_variables",
"tensorflow.layers.conv2d",
"tensorflow.layers.max_pooling2d",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.estimator.inputs.numpy_input_fn"
] |
[((628, 682), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""/tmp/data/"""'], {'one_hot': '(False)'}), "('/tmp/data/', one_hot=False)\n", (653, 682), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((1406, 1442), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '[-1, 28, 28, 1]'}), '(x, shape=[-1, 28, 28, 1])\n', (1416, 1442), True, 'import tensorflow as tf\n'), ((1519, 1568), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x', '(32)', '(5)'], {'activation': 'tf.nn.relu'}), '(x, 32, 5, activation=tf.nn.relu)\n', (1535, 1568), True, 'import tensorflow as tf\n'), ((1654, 1690), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['conv1', '(2)', '(2)'], {}), '(conv1, 2, 2)\n', (1677, 1690), True, 'import tensorflow as tf\n'), ((1767, 1820), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['conv1', '(64)', '(3)'], {'activation': 'tf.nn.relu'}), '(conv1, 64, 3, activation=tf.nn.relu)\n', (1783, 1820), True, 'import tensorflow as tf\n'), ((1906, 1942), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['conv2', '(2)', '(2)'], {}), '(conv2, 2, 2)\n', (1929, 1942), True, 'import tensorflow as tf\n'), ((2023, 2055), 'tensorflow.contrib.layers.flatten', 'tf.contrib.layers.flatten', (['conv2'], {}), '(conv2)\n', (2048, 2055), True, 'import tensorflow as tf\n'), ((2127, 2153), 'tensorflow.layers.dense', 'tf.layers.dense', (['fc1', '(1024)'], {}), '(fc1, 1024)\n', (2142, 2153), True, 'import tensorflow as tf\n'), ((2234, 2292), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['fc1'], {'rate': 'dropout', 'training': 'is_training'}), '(fc1, rate=dropout, training=is_training)\n', (2251, 2292), True, 'import tensorflow as tf\n'), ((2341, 2372), 'tensorflow.layers.dense', 'tf.layers.dense', (['fc1', 'n_classes'], {}), '(fc1, n_classes)\n', (2356, 2372), True, 'import tensorflow as tf\n'), ((2452, 2464), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2462, 2464), True, 'import tensorflow as tf\n'), ((2586, 2642), 'tensorflow.python.estimator.export.export.build_raw_serving_input_receiver_fn', 'export.build_raw_serving_input_receiver_fn', (['feature_spec'], {}), '(feature_spec)\n', (2628, 2642), False, 'from tensorflow.python.estimator.export import export\n'), ((2752, 2883), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', ([], {'x': "{'images': mnist.test.images}", 'y': 'mnist.test.labels', 'batch_size': 'batch_size', 'shuffle': '(False)'}), "(x={'images': mnist.test.images}, y=mnist\n .test.labels, batch_size=batch_size, shuffle=False)\n", (2786, 2883), True, 'import tensorflow as tf\n'), ((3288, 3324), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '[-1, 28, 28, 1]'}), '(x, shape=[-1, 28, 28, 1])\n', (3298, 3324), True, 'import tensorflow as tf\n'), ((3401, 3450), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x', '(32)', '(5)'], {'activation': 'tf.nn.relu'}), '(x, 32, 5, activation=tf.nn.relu)\n', (3417, 3450), True, 'import tensorflow as tf\n'), ((3536, 3572), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['conv1', '(2)', '(2)'], {}), '(conv1, 2, 2)\n', (3559, 3572), True, 'import tensorflow as tf\n'), ((3649, 3702), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['conv1', '(64)', '(3)'], {'activation': 'tf.nn.relu'}), '(conv1, 64, 3, activation=tf.nn.relu)\n', (3665, 3702), True, 'import tensorflow as tf\n'), ((3788, 3824), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['conv2', '(2)', '(2)'], {}), '(conv2, 2, 2)\n', (3811, 3824), True, 'import tensorflow as tf\n'), ((3905, 3937), 'tensorflow.contrib.layers.flatten', 'tf.contrib.layers.flatten', (['conv2'], {}), '(conv2)\n', (3930, 3937), True, 'import tensorflow as tf\n'), ((4009, 4035), 'tensorflow.layers.dense', 'tf.layers.dense', (['fc1', '(1024)'], {}), '(fc1, 1024)\n', (4024, 4035), True, 'import tensorflow as tf\n'), ((4116, 4174), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['fc1'], {'rate': 'dropout', 'training': 'is_training'}), '(fc1, rate=dropout, training=is_training)\n', (4133, 4174), True, 'import tensorflow as tf\n'), ((4264, 4310), 'tensorflow.layers.dense', 'tf.layers.dense', (['fc1', 'n_classes'], {'name': '"""output"""'}), "(fc1, n_classes, name='output')\n", (4279, 4310), True, 'import tensorflow as tf\n'), ((4414, 4492), 'tensorflow.train.write_graph', 'tf.train.write_graph', (['sess.graph_def', '"""lenet_dir"""', '"""lenet.pbtxt"""'], {'as_text': '(True)'}), "(sess.graph_def, 'lenet_dir', 'lenet.pbtxt', as_text=True)\n", (4434, 4492), True, 'import tensorflow as tf\n'), ((2530, 2561), 'tensorflow.constant', 'tf.constant', (['mnist.train.images'], {}), '(mnist.train.images)\n', (2541, 2561), True, 'import tensorflow as tf\n'), ((4324, 4357), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4355, 4357), True, 'import tensorflow as tf\n'), ((4387, 4408), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (4406, 4408), True, 'import tensorflow as tf\n')]
|
from rest_framework import routers
from .api import UserViewSet
router = routers.DefaultRouter()
router.register('users', UserViewSet, 'users')
urlpatterns = router.urls
|
[
"rest_framework.routers.DefaultRouter"
] |
[((75, 98), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {}), '()\n', (96, 98), False, 'from rest_framework import routers\n')]
|
# pylint: disable=no-self-use
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
class TestELMoTokenCharactersIndexer(AllenNlpTestCase):
def test_bos_to_char_ids(self):
indexer = ELMoTokenCharactersIndexer()
indices = indexer.token_to_indices(Token('<S>'), Vocabulary())
expected_indices = [259, 257, 260, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261]
assert indices == expected_indices
def test_eos_to_char_ids(self):
indexer = ELMoTokenCharactersIndexer()
indices = indexer.token_to_indices(Token('</S>'), Vocabulary())
expected_indices = [259, 258, 260, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261]
assert indices == expected_indices
def test_unicode_to_char_ids(self):
indexer = ELMoTokenCharactersIndexer()
indices = indexer.token_to_indices(Token(chr(256) + 't'), Vocabulary())
expected_indices = [259, 197, 129, 117, 260, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261]
assert indices == expected_indices
def test_elmo_as_array_produces_token_sequence(self): # pylint: disable=invalid-name
indexer = ELMoTokenCharactersIndexer()
indices = [
indexer.token_to_indices(Token(token), Vocabulary())
for token in ['Second', '.']
]
padded_tokens = indexer.pad_token_sequence(indices,
desired_num_tokens=3,
padding_lengths={})
expected_padded_tokens = [[259, 84, 102, 100, 112, 111, 101, 260, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261],
[259, 47, 260, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261],
[0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0]]
assert padded_tokens == expected_padded_tokens
|
[
"allennlp.data.token_indexers.ELMoTokenCharactersIndexer",
"allennlp.data.Vocabulary",
"allennlp.data.Token"
] |
[((307, 335), 'allennlp.data.token_indexers.ELMoTokenCharactersIndexer', 'ELMoTokenCharactersIndexer', ([], {}), '()\n', (333, 335), False, 'from allennlp.data.token_indexers import ELMoTokenCharactersIndexer\n'), ((923, 951), 'allennlp.data.token_indexers.ELMoTokenCharactersIndexer', 'ELMoTokenCharactersIndexer', ([], {}), '()\n', (949, 951), False, 'from allennlp.data.token_indexers import ELMoTokenCharactersIndexer\n'), ((1544, 1572), 'allennlp.data.token_indexers.ELMoTokenCharactersIndexer', 'ELMoTokenCharactersIndexer', ([], {}), '()\n', (1570, 1572), False, 'from allennlp.data.token_indexers import ELMoTokenCharactersIndexer\n'), ((2222, 2250), 'allennlp.data.token_indexers.ELMoTokenCharactersIndexer', 'ELMoTokenCharactersIndexer', ([], {}), '()\n', (2248, 2250), False, 'from allennlp.data.token_indexers import ELMoTokenCharactersIndexer\n'), ((379, 391), 'allennlp.data.Token', 'Token', (['"""<S>"""'], {}), "('<S>')\n", (384, 391), False, 'from allennlp.data import Token, Vocabulary\n'), ((393, 405), 'allennlp.data.Vocabulary', 'Vocabulary', ([], {}), '()\n', (403, 405), False, 'from allennlp.data import Token, Vocabulary\n'), ((995, 1008), 'allennlp.data.Token', 'Token', (['"""</S>"""'], {}), "('</S>')\n", (1000, 1008), False, 'from allennlp.data import Token, Vocabulary\n'), ((1010, 1022), 'allennlp.data.Vocabulary', 'Vocabulary', ([], {}), '()\n', (1020, 1022), False, 'from allennlp.data import Token, Vocabulary\n'), ((1639, 1651), 'allennlp.data.Vocabulary', 'Vocabulary', ([], {}), '()\n', (1649, 1651), False, 'from allennlp.data import Token, Vocabulary\n'), ((2312, 2324), 'allennlp.data.Token', 'Token', (['token'], {}), '(token)\n', (2317, 2324), False, 'from allennlp.data import Token, Vocabulary\n'), ((2326, 2338), 'allennlp.data.Vocabulary', 'Vocabulary', ([], {}), '()\n', (2336, 2338), False, 'from allennlp.data import Token, Vocabulary\n')]
|
# encoding: UTF-8
"""
Generates test data for CSV import.
"""
from __future__ import print_function
from __future__ import with_statement
# http://www.python.org/dev/peps/pep-3101/ # unicode.format()
# http://www.python.org/dev/peps/pep-3105/ # print function
import codecs
from uuid import uuid4
from datetime import datetime, timedelta
from sys import stdout
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from membership.models import *
header_row = u'''Kirjauspäivä;Arvopäivä;Määrä EUROA;Tapahtumalajikoodi;Selitys;Saaja/Maksaja;Saajan tilinumero;Viite;Viesti;Arkistotunnus;'''
row = u'''{0[date]};{0[date]};{0[sum]};106;TILISIIRTO;{0[payer]};{0[account]};{0[reference]};{0[message]};{0[id]};'''
def dict_for_cycle(cycle):
payment_date = cycle.last_bill().due_date - timedelta(days=1)
if payment_date > datetime.now():
payment_date = datetime.now()
return {
'date': payment_date.strftime('%d.%m.%Y'),
'sum': cycle.sum,
'payer': cycle.membership.name(),
'account': settings.IBAN_ACCOUNT_NUMBER,
'reference': cycle.reference_number,
'message': "Maksu",
'id': str(uuid4())
}
def print_csv(stream=stdout, count=10):
print(header_row, file=stream)
short_sum = False
high_sum = False
wrong_reference = False
for cycle in BillingCycle.objects.filter(is_paid=False):
if count == 0:
break
d = dict_for_cycle(cycle)
if short_sum is False:
d['sum'] -= 5
short_sum = True
elif high_sum is False:
d['sum'] += 5
high_sum = True
elif wrong_reference is False:
d['reference'] = d['reference'][2:]
wrong_reference = True
print(row.format(d), file=stream)
count -= 1
paid_cycle = BillingCycle.objects.filter(is_paid=True)[0]
print(row.format(dict_for_cycle(paid_cycle)), file=stream)
class Command(BaseCommand):
args = '<file_to_write_to>'
help = 'Generate payments CSV to be used for testing out payment import' \
+ ' form'
def handle(self, *args, **options):
if len(args) > 0:
with codecs.open(args[0], 'w', encoding='iso-8859-1') as f:
if len(args) > 1:
print_csv(stream=f, count=int(args[1]))
else:
print_csv(stream=f)
else:
print_csv()
'''Kirjauspäivä;Arvopäivä;Määrä EUROA;Tapahtumalajikoodi;Selitys;Saaja/Maksaja;Saajan tilinumero;Viite;Viesti;Arkistotunnus;
21.05.2008;21.05.2008;-66,50;106;TILISIIRTO;MATTI MEIKÄLÄINEN;211135-00302106;;VUOSIKOKOUKSEN JA YLLÄPITOMATKAN MATKAKORVAUKSET. HALKO 3/2008. ;20080521593497O10031;
03.08.2008;03.08.2008;-33,00;106;TILISIIRTO;MATTI MEIKÄLÄINEN;211135-00302106;;POSTIKULUKORVAUS LASKUTETTU POSTIKULUSTA. HYVÄKSYTTYHALKOSSA 07/2008 24.7.2008. ;20080803593497AK0018;
27.01.2009;27.01.2009;30,00;588;VIITESIIRTO;MEIKÄLÄINEN MATTI JOHANNES;;00000000000007009017; ;200901252588NGNO0290;
21.01.2010;21.01.2010;-1063,35;106;TILISIIRTO;MATTI MEIKÄLÄINEN;211135-00302106;;HALKO 3/2010 KEVÄTKICKOFF TARVIKKEITA. KUPLAMUOVIA XOBIIN ;20100121593497690187;
21.01.2010;21.01.2010;-73,10;106;TILISIIRTO;MATTI MEIKÄLÄINEN;211135-00302106;;HALKO 3/2010 SIKTEERIVIIKONLOPUN MATKOJA. ;201001215934979N0174;
25.01.2010;25.01.2010;30,00;588;VIITESIIRTO;MEIKÄLÄINEN MATTI JOHANNES;;00000000000001110012;SEPA-MAKSU SAAJA/MOTTAG./BEN: Kapsi Internet-kKULUKOODI: SLEV ALKUP.MÄÄRÄ EUR 30.00+ EUR 30.00+;201001255UTZ00002150;
21.04.2010;21.04.2010;20,00;588;VIITESIIRTO;MEIKÄLÄINEN MATTI JOHANNES;;00000000000000032094; ;201004202588NGN52047;
'''
|
[
"uuid.uuid4",
"datetime.timedelta",
"codecs.open",
"datetime.datetime.now"
] |
[((835, 852), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (844, 852), False, 'from datetime import datetime, timedelta\n'), ((875, 889), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (887, 889), False, 'from datetime import datetime, timedelta\n'), ((914, 928), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (926, 928), False, 'from datetime import datetime, timedelta\n'), ((1202, 1209), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1207, 1209), False, 'from uuid import uuid4\n'), ((2232, 2280), 'codecs.open', 'codecs.open', (['args[0]', '"""w"""'], {'encoding': '"""iso-8859-1"""'}), "(args[0], 'w', encoding='iso-8859-1')\n", (2243, 2280), False, 'import codecs\n')]
|
from gi.repository import Gtk, GdkPixbuf, GLib, Pango
import dhash
import os
import collections
import threading
import Queue
class mainWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="DupDeleter")
self.set_border_width(10)
self.grid = Gtk.Grid()
self.grid.set_column_homogeneous(True)
self.grid.set_row_homogeneous(True)
self.add(self.grid)
# set up the model
# columns = [imgName, imgLocation, # of Dups]
self.dupe_store = Gtk.TreeStore(bool, str, str, int)
self.current_filter_ext = None
self.ext_filter = self.dupe_store.filter_new()
self.ext_filter.set_visible_func(self.ext_filter_func)
# Create model's view
self.treeview = Gtk.TreeView.new_with_model(self.ext_filter)
check_renderer = Gtk.CellRendererToggle()
check_renderer.set_activatable(True)
check_renderer.set_active(False)
check_renderer.connect("toggled", self.on_toggled)
column = Gtk.TreeViewColumn("", check_renderer, active=0)
self.treeview.append_column(column)
for i, column_title in enumerate(["Name", "Location", "# of Dups"]):
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(column_title, renderer, text=i+1)
column.set_sort_column_id(i)
column.set_fixed_width(200)
column.set_resizable(True)
self.treeview.append_column(column)
self.treeview.connect("cursor-changed", self.on_row_changed)
self.scrollable_treelist = Gtk.ScrolledWindow()
self.scrollable_treelist.set_vexpand(True)
# Create buttons for filtering results by image extension
self.extensions = ("jpg", "gif", "png", "tiff", "All")
self.buttons = list()
for ext in self.extensions:
button = Gtk.Button(ext)
self.buttons.append(button)
button.connect("clicked", self.on_selection_button_clicked)
self.create_toolbar()
# Create labels for showing scan progress
self.scan_status_label = Gtk.Label("No Scan in Progress")
self.scan_status_label.set_halign(3) # 3 = CENTER
self.file_scan_label = Gtk.Label(None)
self.file_scan_label.set_ellipsize(Pango.EllipsizeMode.START)
self.file_scan_label.set_width_chars(30)
self.file_scan_label.set_halign(3) # 3 = CENTER
# Make a frame to hold image previews
self.img_frame = Gtk.Frame(label="Selected Image")
self.img_frame.set_label_align(0.5, 0.5)
self.img_frame.set_shadow_type(Gtk.ShadowType.ETCHED_OUT)
self.img_frame.set_size_request(200, 200)
# Assemble the GUI
self.grid.attach(self.scrollable_treelist, 0, 1, 8, 10)
self.grid.attach_next_to(self.img_frame, self.scrollable_treelist,
Gtk.PositionType.RIGHT, 5, 6)
self.grid.attach_next_to(self.buttons[0], self.scrollable_treelist,
Gtk.PositionType.BOTTOM, 1, 1)
for i, button in enumerate(self.buttons[1:]):
self.grid.attach_next_to(button, self.buttons[i],
Gtk.PositionType.RIGHT, 1, 1)
self.scrollable_treelist.add(self.treeview)
self.grid.attach_next_to(self.scan_status_label, self.buttons[3],
Gtk.PositionType.BOTTOM, 6, 1)
self.grid.attach_next_to(self.file_scan_label, self.scan_status_label,
Gtk.PositionType.BOTTOM, 6, 1)
self.grid.set_column_spacing(10)
self.grid.set_row_spacing(5)
self.queue = Queue.Queue() # Queue for holding fetched images
self.delete_list = list() # List for holding to-be-deleted items
self.show_all()
def getDups(self, path, queue):
'''Collects all image duplicates starting from PATH.
Fills a queue with lists of lists
containing image names and locations.'''
images = collections.defaultdict(list)
image_exts = ('.jpg', '.png', '.gif', '.tiff')
GLib.idle_add(self.scan_status_label.set_text, "Scanning...")
for root, dirs, files in os.walk(path):
for name in files:
GLib.idle_add(self.file_scan_label.set_text,
root)
if name[-4:] in image_exts:
img_loc = os.path.join(root, name)
img_hash = dhash.hash(img_loc)
if img_hash != -1:
# Have to add False at beginning because of
# togglebutton status.
images[img_hash].append([False, name, root])
GLib.idle_add(self.scan_status_label.set_text, "Done")
for group in images:
if len(images[group]) > 1:
queue.put(images[group])
GLib.idle_add(self.generateModelData)
def generateModelData(self):
'''Fills treeModel rows with found duplicates'''
while not self.queue.empty():
image_set = self.queue.get()
parent = ''
no_of_dups = len(image_set)
for img in image_set:
img.append(no_of_dups)
if not parent:
parent = self.dupe_store.append(None, img)
else:
self.dupe_store.append(parent, img)
def on_button_clicked_open(self, widget):
'''Brings up the file browser window.
Returns the path of the root folder.'''
dialog = Gtk.FileChooserDialog("Select the root folder", self,
Gtk.FileChooserAction.SELECT_FOLDER,
(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
"Select", Gtk.ResponseType.OK))
dialog.set_default_size(800, 400)
response = dialog.run()
if response == Gtk.ResponseType.OK:
root = dialog.get_uri()[8:] # have to remove file:///
thread = threading.Thread(target=self.getDups,
args=(root, self.queue))
thread.daemon = True
thread.start()
GLib.timeout_add(200, self.generateModelData)
dialog.destroy()
elif response == Gtk.ResponseType.CANCEL:
dialog.destroy()
def ext_filter_func(self, model, iter, data):
'''Tests if the image extension in the row is the one in the filter'''
if self.current_filter_ext is None or self.current_filter_ext == "All":
return True
else:
return model[iter][0][-3:] == self.current_filter_ext
def create_toolbar(self):
toolbar = Gtk.Toolbar()
self.grid.attach(toolbar, 0, 0, 8, 1)
button_open = Gtk.ToolButton.new_from_stock(Gtk.STOCK_OPEN)
button_open.set_tooltip_text("Scan Directory")
toolbar.insert(button_open, 0)
button_delete = Gtk.ToolButton.new_from_stock(Gtk.STOCK_DELETE)
button_delete.set_tooltip_text("Delete Selected Images")
toolbar.insert(button_delete, 1)
button_auto_prune = Gtk.ToolButton.new_from_stock(Gtk.STOCK_NO)
button_auto_prune.set_tooltip_text("Auto-Prune")
toolbar.insert(button_auto_prune, 2)
button_exit = Gtk.ToolButton.new_from_stock(Gtk.STOCK_QUIT)
button_exit.set_tooltip_text("Exit")
toolbar.insert(button_exit, 3)
button_open.connect("clicked", self.on_button_clicked_open)
button_auto_prune.connect("clicked", self.on_button_clicked_auto_prune)
button_delete.connect("clicked", self.on_button_clicked_delete)
button_exit.connect("clicked", self.on_button_clicked_exit)
def on_selection_button_clicked(self, widget):
'''Called on any selection button click'''
self.current_filter_ext = widget.get_label()
self.ext_filter.refilter()
def on_button_clicked_auto_prune(self, widget):
'''
Automatically deletes all files except for parents
'''
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.WARNING,
Gtk.ButtonsType.OK_CANCEL,
"Delete all selected images?")
response = dialog.run()
if response == Gtk.ResponseType.OK:
rootiter = self.dupe_store.get_iter_first()
self.prune_helper(rootiter, False)
dialog.destroy()
elif response == Gtk.ResponseType.CANCEL:
dialog.destroy()
def prune_helper(self, treeiter, toDelete):
'''
Deletes all files except for parents
toDelete indicates whether or not treeiter should be deleted.
It should be set to False on call unless you want to delete
everything.
'''
deleted = 0
isValid = True
while (treeiter is not None) and isValid:
if self.dupe_store.iter_has_child(treeiter):
childiter = self.dupe_store.iter_children(treeiter)
deleted += self.prune_helper(childiter, True)
self.dupe_store[treeiter][3] = self.dupe_store.iter_n_children(treeiter)
if toDelete:
path = os.path.join(self.dupe_store[treeiter][2],
self.dupe_store[treeiter][1])
os.remove(path)
isValid = self.dupe_store.remove(treeiter)
deleted += 1
# If treestore.remove() is successful iter is automatically
# updated to the next iter, so we just need to check to
# make sure that isn't the case before using iter_next()
else:
treeiter = self.dupe_store.iter_next(treeiter)
return deleted
def on_button_clicked_delete(self, widget):
'''Deletes all selected files'''
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.WARNING,
Gtk.ButtonsType.OK_CANCEL,
"Delete all selected images?")
response = dialog.run()
if response == Gtk.ResponseType.OK:
rootiter = self.dupe_store.get_iter_first()
self.delete_helper(rootiter)
dialog.destroy()
elif response == Gtk. ResponseType.CANCEL:
dialog.destroy()
def delete_helper(self, treeiter):
'''
Recursively searches through all rows searching for files to
delete.
'''
deleted = 0
isValid = True
while (treeiter is not None) and isValid:
if self.dupe_store.iter_has_child(treeiter):
childiter = self.dupe_store.iter_children(treeiter)
deleted += self.delete_helper(childiter)
if self.dupe_store[treeiter][0]:
path = os.path.join(self.dupe_store[treeiter][2],
self.dupe_store[treeiter][1])
if self.dupe_store.iter_has_child(treeiter):
child = self.dupe_store.iter_children(treeiter)
isValid = self.childToParent(treeiter, child)
else:
isValid = self.dupe_store.remove(treeiter)
os.remove(path)
deleted += 1
self.dupe_store[treeiter][3] = self.dupe_store.iter_n_children(treeiter)
else:
treeiter = self.dupe_store.iter_next(treeiter)
return deleted
def on_button_clicked_exit(self, widget):
'''Exits the program'''
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.WARNING,
Gtk.ButtonsType.OK_CANCEL,
"Are you sure you want to exit?")
response = dialog.run()
if response == Gtk.ResponseType.OK:
Gtk.main_quit()
elif response == Gtk.ResponseType.CANCEL:
dialog.destroy()
def childToParent(self, parent_iter, child_iter):
'''
Replaces parent_iter with child_iter, effectively moving child to
parent's position.
Returns the next iter after parent, or invalidates it if there
isn't one.
'''
for col in xrange(self.dupe_store.get_n_columns()):
childval = self.dupe_store.get_value(child_iter, col)
self.dupe_store.set_value(parent_iter, col, childval)
self.dupe_store.remove(child_iter)
return self.dupe_store.iter_next(parent_iter)
def on_row_changed(self, widget):
(model, pathlist) = widget.get_selection().get_selected_rows()
tree_iter = model.get_iter(pathlist[0])
img_name = model.get_value(tree_iter, 1)
img_root = model.get_value(tree_iter, 2)
img_loc = os.path.join(img_root, img_name)
child = self.img_frame.get_child()
if child:
self.img_frame.remove(child)
alloc = self.img_frame.get_allocation()
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(img_loc,
alloc.width - 20,
alloc.height - 20,
True)
image = Gtk.Image.new_from_pixbuf(pixbuf)
image.show()
self.img_frame.add(image)
def on_toggled(self, widget, path):
'''
Adds or removes the row's treeiter to a list that designates it
for removal.
'''
self.dupe_store[path][0] = not self.dupe_store[path][0]
win = mainWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
[
"os.remove",
"os.walk",
"gi.repository.Gtk.Button",
"collections.defaultdict",
"gi.repository.Gtk.TreeViewColumn",
"gi.repository.Gtk.main_quit",
"gi.repository.Gtk.ToolButton.new_from_stock",
"gi.repository.Gtk.CellRendererText",
"gi.repository.Gtk.Image.new_from_pixbuf",
"gi.repository.Gtk.Grid",
"os.path.join",
"gi.repository.GLib.idle_add",
"gi.repository.Gtk.Frame",
"gi.repository.Gtk.TreeStore",
"Queue.Queue",
"gi.repository.Gtk.CellRendererToggle",
"gi.repository.Gtk.ScrolledWindow",
"gi.repository.Gtk.Window.__init__",
"threading.Thread",
"gi.repository.Gtk.main",
"gi.repository.GdkPixbuf.Pixbuf.new_from_file_at_scale",
"gi.repository.Gtk.Toolbar",
"gi.repository.Gtk.MessageDialog",
"gi.repository.Gtk.FileChooserDialog",
"gi.repository.GLib.timeout_add",
"dhash.hash",
"gi.repository.Gtk.TreeView.new_with_model",
"gi.repository.Gtk.Label"
] |
[((13782, 13792), 'gi.repository.Gtk.main', 'Gtk.main', ([], {}), '()\n', (13790, 13792), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((191, 236), 'gi.repository.Gtk.Window.__init__', 'Gtk.Window.__init__', (['self'], {'title': '"""DupDeleter"""'}), "(self, title='DupDeleter')\n", (210, 236), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((292, 302), 'gi.repository.Gtk.Grid', 'Gtk.Grid', ([], {}), '()\n', (300, 302), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((530, 564), 'gi.repository.Gtk.TreeStore', 'Gtk.TreeStore', (['bool', 'str', 'str', 'int'], {}), '(bool, str, str, int)\n', (543, 564), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((778, 822), 'gi.repository.Gtk.TreeView.new_with_model', 'Gtk.TreeView.new_with_model', (['self.ext_filter'], {}), '(self.ext_filter)\n', (805, 822), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((848, 872), 'gi.repository.Gtk.CellRendererToggle', 'Gtk.CellRendererToggle', ([], {}), '()\n', (870, 872), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((1035, 1083), 'gi.repository.Gtk.TreeViewColumn', 'Gtk.TreeViewColumn', (['""""""', 'check_renderer'], {'active': '(0)'}), "('', check_renderer, active=0)\n", (1053, 1083), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((1598, 1618), 'gi.repository.Gtk.ScrolledWindow', 'Gtk.ScrolledWindow', ([], {}), '()\n', (1616, 1618), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((2130, 2162), 'gi.repository.Gtk.Label', 'Gtk.Label', (['"""No Scan in Progress"""'], {}), "('No Scan in Progress')\n", (2139, 2162), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((2254, 2269), 'gi.repository.Gtk.Label', 'Gtk.Label', (['None'], {}), '(None)\n', (2263, 2269), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((2518, 2551), 'gi.repository.Gtk.Frame', 'Gtk.Frame', ([], {'label': '"""Selected Image"""'}), "(label='Selected Image')\n", (2527, 2551), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((3703, 3716), 'Queue.Queue', 'Queue.Queue', ([], {}), '()\n', (3714, 3716), False, 'import Queue\n'), ((4058, 4087), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (4081, 4087), False, 'import collections\n'), ((4152, 4213), 'gi.repository.GLib.idle_add', 'GLib.idle_add', (['self.scan_status_label.set_text', '"""Scanning..."""'], {}), "(self.scan_status_label.set_text, 'Scanning...')\n", (4165, 4213), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((4248, 4261), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (4255, 4261), False, 'import os\n'), ((4773, 4827), 'gi.repository.GLib.idle_add', 'GLib.idle_add', (['self.scan_status_label.set_text', '"""Done"""'], {}), "(self.scan_status_label.set_text, 'Done')\n", (4786, 4827), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((4947, 4984), 'gi.repository.GLib.idle_add', 'GLib.idle_add', (['self.generateModelData'], {}), '(self.generateModelData)\n', (4960, 4984), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((5622, 5797), 'gi.repository.Gtk.FileChooserDialog', 'Gtk.FileChooserDialog', (['"""Select the root folder"""', 'self', 'Gtk.FileChooserAction.SELECT_FOLDER', "(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, 'Select', Gtk.ResponseType.OK)"], {}), "('Select the root folder', self, Gtk.FileChooserAction\n .SELECT_FOLDER, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, 'Select',\n Gtk.ResponseType.OK))\n", (5643, 5797), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((6844, 6857), 'gi.repository.Gtk.Toolbar', 'Gtk.Toolbar', ([], {}), '()\n', (6855, 6857), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((6927, 6972), 'gi.repository.Gtk.ToolButton.new_from_stock', 'Gtk.ToolButton.new_from_stock', (['Gtk.STOCK_OPEN'], {}), '(Gtk.STOCK_OPEN)\n', (6956, 6972), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((7092, 7139), 'gi.repository.Gtk.ToolButton.new_from_stock', 'Gtk.ToolButton.new_from_stock', (['Gtk.STOCK_DELETE'], {}), '(Gtk.STOCK_DELETE)\n', (7121, 7139), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((7275, 7318), 'gi.repository.Gtk.ToolButton.new_from_stock', 'Gtk.ToolButton.new_from_stock', (['Gtk.STOCK_NO'], {}), '(Gtk.STOCK_NO)\n', (7304, 7318), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((7444, 7489), 'gi.repository.Gtk.ToolButton.new_from_stock', 'Gtk.ToolButton.new_from_stock', (['Gtk.STOCK_QUIT'], {}), '(Gtk.STOCK_QUIT)\n', (7473, 7489), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((8207, 8321), 'gi.repository.Gtk.MessageDialog', 'Gtk.MessageDialog', (['self', '(0)', 'Gtk.MessageType.WARNING', 'Gtk.ButtonsType.OK_CANCEL', '"""Delete all selected images?"""'], {}), "(self, 0, Gtk.MessageType.WARNING, Gtk.ButtonsType.\n OK_CANCEL, 'Delete all selected images?')\n", (8224, 8321), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((10017, 10131), 'gi.repository.Gtk.MessageDialog', 'Gtk.MessageDialog', (['self', '(0)', 'Gtk.MessageType.WARNING', 'Gtk.ButtonsType.OK_CANCEL', '"""Delete all selected images?"""'], {}), "(self, 0, Gtk.MessageType.WARNING, Gtk.ButtonsType.\n OK_CANCEL, 'Delete all selected images?')\n", (10034, 10131), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((11710, 11827), 'gi.repository.Gtk.MessageDialog', 'Gtk.MessageDialog', (['self', '(0)', 'Gtk.MessageType.WARNING', 'Gtk.ButtonsType.OK_CANCEL', '"""Are you sure you want to exit?"""'], {}), "(self, 0, Gtk.MessageType.WARNING, Gtk.ButtonsType.\n OK_CANCEL, 'Are you sure you want to exit?')\n", (11727, 11827), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((12910, 12942), 'os.path.join', 'os.path.join', (['img_root', 'img_name'], {}), '(img_root, img_name)\n', (12922, 12942), False, 'import os\n'), ((13110, 13206), 'gi.repository.GdkPixbuf.Pixbuf.new_from_file_at_scale', 'GdkPixbuf.Pixbuf.new_from_file_at_scale', (['img_loc', '(alloc.width - 20)', '(alloc.height - 20)', '(True)'], {}), '(img_loc, alloc.width - 20, alloc.\n height - 20, True)\n', (13149, 13206), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((13392, 13425), 'gi.repository.Gtk.Image.new_from_pixbuf', 'Gtk.Image.new_from_pixbuf', (['pixbuf'], {}), '(pixbuf)\n', (13417, 13425), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((1228, 1250), 'gi.repository.Gtk.CellRendererText', 'Gtk.CellRendererText', ([], {}), '()\n', (1248, 1250), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((1272, 1326), 'gi.repository.Gtk.TreeViewColumn', 'Gtk.TreeViewColumn', (['column_title', 'renderer'], {'text': '(i + 1)'}), '(column_title, renderer, text=i + 1)\n', (1290, 1326), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((1887, 1902), 'gi.repository.Gtk.Button', 'Gtk.Button', (['ext'], {}), '(ext)\n', (1897, 1902), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((6154, 6216), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.getDups', 'args': '(root, self.queue)'}), '(target=self.getDups, args=(root, self.queue))\n', (6170, 6216), False, 'import threading\n'), ((6327, 6372), 'gi.repository.GLib.timeout_add', 'GLib.timeout_add', (['(200)', 'self.generateModelData'], {}), '(200, self.generateModelData)\n', (6343, 6372), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((11981, 11996), 'gi.repository.Gtk.main_quit', 'Gtk.main_quit', ([], {}), '()\n', (11994, 11996), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((4310, 4360), 'gi.repository.GLib.idle_add', 'GLib.idle_add', (['self.file_scan_label.set_text', 'root'], {}), '(self.file_scan_label.set_text, root)\n', (4323, 4360), False, 'from gi.repository import Gtk, GdkPixbuf, GLib, Pango\n'), ((9368, 9440), 'os.path.join', 'os.path.join', (['self.dupe_store[treeiter][2]', 'self.dupe_store[treeiter][1]'], {}), '(self.dupe_store[treeiter][2], self.dupe_store[treeiter][1])\n', (9380, 9440), False, 'import os\n'), ((9493, 9508), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (9502, 9508), False, 'import os\n'), ((10971, 11043), 'os.path.join', 'os.path.join', (['self.dupe_store[treeiter][2]', 'self.dupe_store[treeiter][1]'], {}), '(self.dupe_store[treeiter][2], self.dupe_store[treeiter][1])\n', (10983, 11043), False, 'import os\n'), ((11376, 11391), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (11385, 11391), False, 'import os\n'), ((4465, 4489), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (4477, 4489), False, 'import os\n'), ((4521, 4540), 'dhash.hash', 'dhash.hash', (['img_loc'], {}), '(img_loc)\n', (4531, 4540), False, 'import dhash\n')]
|
import json
from application.handlers.base import BaseHandler
class APIKMLDownloader(BaseHandler):
def get(self):
if self.request.get('project_code'):
project_code = self.request.get('project_code')
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps({'project_code': project_code}))
|
[
"json.dumps"
] |
[((327, 369), 'json.dumps', 'json.dumps', (["{'project_code': project_code}"], {}), "({'project_code': project_code})\n", (337, 369), False, 'import json\n')]
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from pyannote.database import Database
from pyannote.database.protocol import SpeakerDiarizationProtocol
from pyannote.parser import MDTMParser
import os.path as op
from pyannote.core import Segment, Annotation
class GameOfThronesSpeakerDiarizationProtocol(SpeakerDiarizationProtocol):
"""Base speaker diarization protocol for GameOfThrones database
This class should be inherited from, not used directly.
Parameters
----------
preprocessors : dict or (key, preprocessor) iterable
When provided, each protocol item (dictionary) are preprocessed, such
that item[key] = preprocessor(**item). In case 'preprocessor' is not
callable, it should be a string containing placeholder for item keys
(e.g. {'wav': '/path/to/{uri}.wav'})
"""
def __init__(self, preprocessors={}, **kwargs):
super(GameOfThronesSpeakerDiarizationProtocol, self).__init__(
preprocessors=preprocessors, **kwargs)
self.mdtm_parser_ = MDTMParser()
@staticmethod
def get_audio_path(uri):
return op.join(
op.dirname(op.realpath(__file__)),
'data', 'audio', '{uri}.txt'.format(uri=uri))
def load_speaker(self, uri):
speaker = Annotation(uri=uri)
path = self.get_audio_path(uri)
with open(path, 'r') as fp:
for line in fp:
start, duration, name, _, _ = line.strip().split()
start = float(start)
end = start + float(duration)
speaker[Segment(start, end)] = name
return speaker.smooth()
def _subset(self, protocol, subset):
data_dir = op.join(op.dirname(op.realpath(__file__)), 'data')
# load annotations
path = op.join(
data_dir,
'{protocol}.{subset}.lst'.format(subset=subset, protocol=protocol))
with open(path, 'r') as fp:
for line in fp:
uri = line.strip()
annotation = self.load_speaker(uri)
item = {
'database': 'GameOfThrones',
'uri': uri,
'annotation': annotation}
yield item
class Season1(GameOfThronesSpeakerDiarizationProtocol):
"""Season 1
* Training set: episode #1, #2, #3, #4, #5
* Development set: episode #6
* Test set: episode #7, #8, #9, #10
"""
def trn_iter(self):
return self._subset('Season1', 'trn')
def dev_iter(self):
return self._subset('Season1', 'dev')
def tst_iter(self):
return self._subset('Season1', 'tst')
class Season1Test(GameOfThronesSpeakerDiarizationProtocol):
"""Season 1
* Training set: -- not available --
* Development set: -- not available --
* Test set: episode #1, #2, #3, #4, #5, #6, #7, #8, #9, #10
"""
def tst_iter(self):
return self._subset('Season1Test', 'tst')
class GameOfThrones(Database):
"""GameOfThrones corpus
Parameters
----------
preprocessors : dict or (key, preprocessor) iterable
When provided, each protocol item (dictionary) are preprocessed, such
that item[key] = preprocessor(**item). In case 'preprocessor' is not
callable, it should be a string containing placeholder for item keys
(e.g. {'wav': '/path/to/{uri}.wav'})
Reference
---------
Citation
--------
Website
-------
"""
def __init__(self, preprocessors={}, **kwargs):
super(GameOfThrones, self).__init__(preprocessors=preprocessors, **kwargs)
self.register_protocol(
'SpeakerDiarization', 'Season1', Season1)
self.register_protocol(
'SpeakerDiarization', 'Season1Test', Season1Test)
|
[
"pyannote.core.Annotation",
"os.path.realpath",
"pyannote.core.Segment",
"pyannote.parser.MDTMParser"
] |
[((2280, 2292), 'pyannote.parser.MDTMParser', 'MDTMParser', ([], {}), '()\n', (2290, 2292), False, 'from pyannote.parser import MDTMParser\n'), ((2522, 2541), 'pyannote.core.Annotation', 'Annotation', ([], {'uri': 'uri'}), '(uri=uri)\n', (2532, 2541), False, 'from pyannote.core import Segment, Annotation\n'), ((2388, 2409), 'os.path.realpath', 'op.realpath', (['__file__'], {}), '(__file__)\n', (2399, 2409), True, 'import os.path as op\n'), ((2961, 2982), 'os.path.realpath', 'op.realpath', (['__file__'], {}), '(__file__)\n', (2972, 2982), True, 'import os.path as op\n'), ((2820, 2839), 'pyannote.core.Segment', 'Segment', (['start', 'end'], {}), '(start, end)\n', (2827, 2839), False, 'from pyannote.core import Segment, Annotation\n')]
|
import toolkit.autodiff.math.scalar as am
import toolkit.autodiff.math as m
from ..math import IdentityOp
from ..CalcFlow import CalcFlow
class FloatScalar(CalcFlow):
def __init__(self, value):
super().__init__(value)
def _calc_unary(self, func):
calc_val = FloatScalar(func.calculate())
super(FloatScalar, self).__calc_unary__(calc_val, func)
return calc_val
def _calc_binary(self, other, func):
calc_val = FloatScalar(func.calculate())
super(FloatScalar, self).__calc_binary__(calc_val, other, func)
return calc_val
@classmethod
def create(cls, value):
v = FloatScalar(value)
math_func = IdentityOp(v)
calc_val = v._calc_unary(math_func)
calc_val.identity = math_func
return calc_val
def __mul__(self, other):
if not CalcFlow.is_calc_flow(other):
raise ValueError("Not CalcFlow")
math_func = m.MultiplyOp(self, other)
return self._calc_binary(other, math_func)
def __add__(self, other):
if not CalcFlow.is_calc_flow(other):
raise ValueError("Not CalcFlow")
math_func = m.AdditionOp(self, other)
return self._calc_binary(other, math_func)
def __sub__(self, other):
if not CalcFlow.is_calc_flow(other):
raise ValueError("Not CalcFlow")
math_func = m.SubtractionOp(self, other)
return self._calc_binary(other, math_func)
def __pow__(self, other):
if not CalcFlow.is_calc_flow(other):
raise ValueError("Not CalcFlow")
math_func = m.ExponentOp(self, other)
return self._calc_binary(other, math_func)
def __div__(self, other):
if not CalcFlow.is_calc_flow(other):
raise ValueError("Not CalcFlow")
math_func = m.DivideOp(self, other)
return self._calc_binary(other, math_func)
def sin(self):
math_func = am.SinOp(self)
return self._calc_unary(math_func)
def exp(self):
math_func = am.ExpOp(self)
return self._calc_unary(math_func)
def ln(self):
math_func = am.LnOp(self)
return self._calc_unary(math_func)
def __truediv__(self, other):
return self.__div__(other)
def __str__(self):
return "[%s] %s" % (self.__id__, self.value)
|
[
"toolkit.autodiff.math.DivideOp",
"toolkit.autodiff.math.AdditionOp",
"toolkit.autodiff.math.scalar.LnOp",
"toolkit.autodiff.math.ExponentOp",
"toolkit.autodiff.math.scalar.SinOp",
"toolkit.autodiff.math.scalar.ExpOp",
"toolkit.autodiff.math.MultiplyOp",
"toolkit.autodiff.math.SubtractionOp"
] |
[((964, 989), 'toolkit.autodiff.math.MultiplyOp', 'm.MultiplyOp', (['self', 'other'], {}), '(self, other)\n', (976, 989), True, 'import toolkit.autodiff.math as m\n'), ((1185, 1210), 'toolkit.autodiff.math.AdditionOp', 'm.AdditionOp', (['self', 'other'], {}), '(self, other)\n', (1197, 1210), True, 'import toolkit.autodiff.math as m\n'), ((1406, 1434), 'toolkit.autodiff.math.SubtractionOp', 'm.SubtractionOp', (['self', 'other'], {}), '(self, other)\n', (1421, 1434), True, 'import toolkit.autodiff.math as m\n'), ((1630, 1655), 'toolkit.autodiff.math.ExponentOp', 'm.ExponentOp', (['self', 'other'], {}), '(self, other)\n', (1642, 1655), True, 'import toolkit.autodiff.math as m\n'), ((1851, 1874), 'toolkit.autodiff.math.DivideOp', 'm.DivideOp', (['self', 'other'], {}), '(self, other)\n', (1861, 1874), True, 'import toolkit.autodiff.math as m\n'), ((1968, 1982), 'toolkit.autodiff.math.scalar.SinOp', 'am.SinOp', (['self'], {}), '(self)\n', (1976, 1982), True, 'import toolkit.autodiff.math.scalar as am\n'), ((2068, 2082), 'toolkit.autodiff.math.scalar.ExpOp', 'am.ExpOp', (['self'], {}), '(self)\n', (2076, 2082), True, 'import toolkit.autodiff.math.scalar as am\n'), ((2167, 2180), 'toolkit.autodiff.math.scalar.LnOp', 'am.LnOp', (['self'], {}), '(self)\n', (2174, 2180), True, 'import toolkit.autodiff.math.scalar as am\n')]
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from datetime import datetime
class Scraping(object):
driver = webdriver.Chrome()
hours: str
edt = {}
def __init__(self, username, password):
# Ouvre la page dans Chrome
self.driver.delete_all_cookies()
self.driver.get('https://ent.iut.univ-paris8.fr/edt/presentations.php')
# Se connecter
self.driver.find_element(By.ID, "username").send_keys(username)
self.driver.find_element(By.ID, "password").send_keys(password)
self.driver.find_element(By.NAME, "submit").click()
self.getEDT()
def getWhatDayWeek(self) -> int:
""" Récupère de 0..7, le jours de la semaine. """
return datetime.today().weekday()
def writeCours(self, element, elementHeure):
""" Écrit l'heure en index et crée une liste contenant les infos concernant le cours """
hoursIndex = elementHeure.find_element(By.CLASS_NAME,"fright").text.split("h")
hoursIndex = float(f"{hoursIndex[0]}.{hoursIndex[1]}") if len(hoursIndex) == 2 else int(hoursIndex[0])
self.edt[hoursIndex] = []
for n,el in enumerate(element.text.split("\n")):
self.edt[hoursIndex].append(el)
def getEDT(self, todayDate=int(datetime.today().weekday())):
""" Update l'emploie du temps """
# Click sur l'emploie du temps qui correspond au jour de la semaine
for element in self.driver.find_elements(By.CLASS_NAME, f"jours.jour{todayDate+1}.plageDIVn"):
if len(element.text) > 0:
allElementsCanFind = self.driver.find_elements(By.CLASS_NAME,"ligne.petit") + self.driver.find_elements(By.CLASS_NAME,"lignegrey") + self.driver.find_elements(By.CLASS_NAME,"plage.petit") # ((todayDate if todayDate not in [5,6] else 0)*20)+6.6667 => only use for B group
print(element.get_attribute("style"))
print(element.text)
if element.text.count("Amphi") >= 1 or element.text.count("Amphi2") >= 1:
for elementHeure in allElementsCanFind:
if abs(int(elementHeure.get_attribute("style").split(";")[0].replace("top: ", "").replace("px", ""))-int(element.get_attribute("style").split(';')[0].replace("top: ", "").replace("px", ""))) in [30, 10]:
print(elementHeure.text)
self.writeCours(element, elementHeure)
break
else:
for style in element.get_attribute("style").split(";"):
print(style)
print(style.count(f" margin-left: {((todayDate+1 if todayDate not in [5,6] else 0)*10)+3.3333}"))
if style.count(f" margin-left: {((todayDate+1 if todayDate not in [5,6] else 0)*10)+3.3333}") >= 1:
for elementHeure in allElementsCanFind:
print(abs(int(elementHeure.get_attribute("style").split(";")[0].replace("top: ","").replace("px","")) - int(element.get_attribute("style").split(';')[0].replace("top: ","").replace("px",""))))
if abs(int(elementHeure.get_attribute("style").split(";")[0].replace("top: ","").replace("px","")) - int(element.get_attribute("style").split(';')[0].replace("top: ","").replace("px",""))) in [30,10]:
print(elementHeure.text)
self.writeCours(element, elementHeure)
break
print(self.edt)
|
[
"datetime.datetime.today",
"selenium.webdriver.Chrome"
] |
[((144, 162), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (160, 162), False, 'from selenium import webdriver\n'), ((753, 769), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (767, 769), False, 'from datetime import datetime\n'), ((1296, 1312), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1310, 1312), False, 'from datetime import datetime\n')]
|
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import C;
LINK_CLSID = '{00021401-0000-0000-C000-000000000046}';
VALID_SHOW_COMMAND_VALUES = {
1: 'SW_SHOWNORMAL',
3: 'SW_SHOWMAXIMIZED',
7: 'SW_SHOWMINNOACTIVE',
};
VALID_HOTKEY_LOWBYTE_VALUES = {
0x90: 'NumLock',
0x91: 'ScrollLock',
};
for i in range(0x30, 0x3A):
VALID_HOTKEY_LOWBYTE_VALUES[i] = chr(i); # 0 - 9
for i in range(0x41, 0x5B):
VALID_HOTKEY_LOWBYTE_VALUES[i] = chr(i); # A - Z
for i in range(1, 25):
VALID_HOTKEY_LOWBYTE_VALUES[i + 0x6F] = 'F%d' % i; # F1 - F24 (!)
# bitflags_LinkFlags
def bitflags_LinkFlags(stream, offset, max_size, parent, name):
result = C.BITFIELD(stream, offset, max_size, parent, name, \
('Reserved', 5),
('KeepLocalIDListForUNCTarget', 1),
('PreferEnvironmentPath', 1),
('UnaliasOnSave', 1),
('AllowLinkToLink', 1),
('DisableKnownFolderAlias', 1),
('DisableKnownFolderTracking', 1),
('DisableLinkPathTracking', 1),
('EnableTargetMetadata', 1),
('ForceNoLinkTrack', 1),
('RunWithShimLayer', 1),
('Unused2', 1),
('NoPidlAlis', 1),
('HasExpIcon', 1),
('RunAsUser', 1),
('HasDarwinID', 1),
('Unused1', 1),
('RunInSeperateProcess', 1),
('HasExpString', 1),
('ForceNoLinkInfo', 1),
('IsUnicode', 1),
('HasIconLocation', 1),
('HasArguments', 1),
('HasWorkingDir', 1),
('HasRelativePath', 1),
('HasName', 1),
('HasLinkInfo', 1),
('HasLinkTargetIDList', 1)
);
result.dump_simplified = True;
if result._Unused1.value != 0:
result._Unused1.warnings.append('Expected value to be 0');
if result._Unused2.value != 0:
result._Unused2.warnings.append('Expected value to be 0');
if result._Reserved.value != 0:
result._Reserved.warnings.append('Expected value to be 0');
return result;
# bitflags_FileAttributes
def bitflags_FileAttributes(stream, offset, max_size, parent, name):
result = C.BITFIELD(stream, offset, max_size, parent, name, \
('Unused', 17),
('FILE_ATTRIBUTE_ENCRYPTED', 1),
('FILE_ATTRIBUTE_NOT_CONTENT_INDEXED', 1),
('FILE_ATTRIBUTE_OFFLINE', 1),
('FILE_ATTRIBUTE_COMPRESSED', 1),
('FILE_ATTRIBUTE_REPARSE_POINT', 1),
('FILE_ATTRIBUTE_SPARSE_FILE', 1),
('FILE_ATTRIBUTE_TEMPORARY', 1),
('FILE_ATTRIBUTE_NORMAL', 1),
('Reserved2', 1),
('FILE_ATTRIBUTE_ARCHIVE', 1),
('FILE_ATTRIBUTE_DIRECTORY', 1),
('Reserved1', 1),
('FILE_ATTRIBUTE_SYSTEM', 1),
('FILE_ATTRIBUTE_HIDDEN', 1),
('FILE_ATTRIBUTE_READONLY', 1)
);
result.dump_simplified = True;
if result._Reserved1.value != 0:
result._Reserved1.warnings.append('Expected value to be 0');
if result._Reserved2.value != 0:
result._Reserved2.warnings.append('Expected value to be 0');
return result;
# struct_HotKeyFlags
def struct_HotKeyFlags(stream, offset, max_size, parent, name):
result = C.STRUCT(stream, offset, max_size, parent, name, \
'HotKeyFlags', \
('LowByte', C.BYTE),
('HighByte', {C.BITFIELD: (
('Reserved', 5),
('HOTKEYF_ALT', 1),
('HOTKEYF_CONTROL', 1),
('HOTKEYF_SHIFT', 1),
)})
);
if results._LowByte.value in VALID_HOTKEY_LOWBYTE_VALUES:
result._LowByte.notes.append( \
VALID_HOTKEY_LOWBYTE_VALUES[result._LowByte.value]);
else:
result._LowByte.warnings.append('Unrecognized value');
if results._HighByte._Reserved.value > 0:
results._HighByte._Reserved.warnings.append('Expected value to be 0');
return result;
# http://download.microsoft.com/download/B/0/B/B0B199DB-41E6-400F-90CD-C350D0C14A53/%5BMS-SHLLINK%5D.pdf
def struct_SHELL_LINK_HEADER(stream, offset, max_size, parent, name):
import C;
from struct_GUID import struct_GUID;
result = C.STRUCT(stream, offset, max_size, parent, name, \
'LNK_HEADER', \
('HeaderSize', C.DWORD),
('LinkCLSID', struct_GUID),
('LinkFlags', bitflags_LinkFlags),
('FileAttributes', bitflags_FileAttributes),
('CreationTime', C.QWORD),
('AccessTime', C.QWORD),
('WriteTime', C.QWORD),
('FileSize', C.UINT),
('IconIndex', C.INT),
('ShowCommand', C.UINT),
('HotKey', C.WORD),
('Reserved1', C.WORD),
('Reserved2', C.DWORD),
('Reserved3', C.DWORD)
);
if result._HeaderSize.value != 0x4C:
result._HeaderSize.warnings.append(
'expected value to be 0x4C');
if result._LinkCLSID.string_value != LINK_CLSID:
result._LinkCLSID.warnings.append('expected value to be "%s"' % LINK_CLSID);
if result._ShowCommand.value in VALID_SHOW_COMMAND_VALUES:
result._ShowCommand.notes.append( \
VALID_SHOW_COMMAND_VALUES[result._ShowCommand.value]);
else:
valid_values = VALID_SHOW_COMMAND_VALUES.keys()
valid_values = '%s or %s' % \
(', '.join(valid_values[:-1]), valid_values[-1]);
result._ShowCommand.warnings.append( \
'Expected value to be %s' % valid_values);
if result._Reserved1.value != 0:
result._Reserved1.warnings.append('Expected value to be 0');
if result._Reserved2.value != 0:
result._Reserved2.warnings.append('Expected value to be 0');
return result;
|
[
"C.BITFIELD",
"C.STRUCT"
] |
[((1212, 1971), 'C.BITFIELD', 'C.BITFIELD', (['stream', 'offset', 'max_size', 'parent', 'name', "('Reserved', 5)", "('KeepLocalIDListForUNCTarget', 1)", "('PreferEnvironmentPath', 1)", "('UnaliasOnSave', 1)", "('AllowLinkToLink', 1)", "('DisableKnownFolderAlias', 1)", "('DisableKnownFolderTracking', 1)", "('DisableLinkPathTracking', 1)", "('EnableTargetMetadata', 1)", "('ForceNoLinkTrack', 1)", "('RunWithShimLayer', 1)", "('Unused2', 1)", "('NoPidlAlis', 1)", "('HasExpIcon', 1)", "('RunAsUser', 1)", "('HasDarwinID', 1)", "('Unused1', 1)", "('RunInSeperateProcess', 1)", "('HasExpString', 1)", "('ForceNoLinkInfo', 1)", "('IsUnicode', 1)", "('HasIconLocation', 1)", "('HasArguments', 1)", "('HasWorkingDir', 1)", "('HasRelativePath', 1)", "('HasName', 1)", "('HasLinkInfo', 1)", "('HasLinkTargetIDList', 1)"], {}), "(stream, offset, max_size, parent, name, ('Reserved', 5), (\n 'KeepLocalIDListForUNCTarget', 1), ('PreferEnvironmentPath', 1), (\n 'UnaliasOnSave', 1), ('AllowLinkToLink', 1), ('DisableKnownFolderAlias',\n 1), ('DisableKnownFolderTracking', 1), ('DisableLinkPathTracking', 1),\n ('EnableTargetMetadata', 1), ('ForceNoLinkTrack', 1), (\n 'RunWithShimLayer', 1), ('Unused2', 1), ('NoPidlAlis', 1), (\n 'HasExpIcon', 1), ('RunAsUser', 1), ('HasDarwinID', 1), ('Unused1', 1),\n ('RunInSeperateProcess', 1), ('HasExpString', 1), ('ForceNoLinkInfo', 1\n ), ('IsUnicode', 1), ('HasIconLocation', 1), ('HasArguments', 1), (\n 'HasWorkingDir', 1), ('HasRelativePath', 1), ('HasName', 1), (\n 'HasLinkInfo', 1), ('HasLinkTargetIDList', 1))\n", (1222, 1971), False, 'import C\n'), ((3269, 3842), 'C.BITFIELD', 'C.BITFIELD', (['stream', 'offset', 'max_size', 'parent', 'name', "('Unused', 17)", "('FILE_ATTRIBUTE_ENCRYPTED', 1)", "('FILE_ATTRIBUTE_NOT_CONTENT_INDEXED', 1)", "('FILE_ATTRIBUTE_OFFLINE', 1)", "('FILE_ATTRIBUTE_COMPRESSED', 1)", "('FILE_ATTRIBUTE_REPARSE_POINT', 1)", "('FILE_ATTRIBUTE_SPARSE_FILE', 1)", "('FILE_ATTRIBUTE_TEMPORARY', 1)", "('FILE_ATTRIBUTE_NORMAL', 1)", "('Reserved2', 1)", "('FILE_ATTRIBUTE_ARCHIVE', 1)", "('FILE_ATTRIBUTE_DIRECTORY', 1)", "('Reserved1', 1)", "('FILE_ATTRIBUTE_SYSTEM', 1)", "('FILE_ATTRIBUTE_HIDDEN', 1)", "('FILE_ATTRIBUTE_READONLY', 1)"], {}), "(stream, offset, max_size, parent, name, ('Unused', 17), (\n 'FILE_ATTRIBUTE_ENCRYPTED', 1), ('FILE_ATTRIBUTE_NOT_CONTENT_INDEXED', \n 1), ('FILE_ATTRIBUTE_OFFLINE', 1), ('FILE_ATTRIBUTE_COMPRESSED', 1), (\n 'FILE_ATTRIBUTE_REPARSE_POINT', 1), ('FILE_ATTRIBUTE_SPARSE_FILE', 1),\n ('FILE_ATTRIBUTE_TEMPORARY', 1), ('FILE_ATTRIBUTE_NORMAL', 1), (\n 'Reserved2', 1), ('FILE_ATTRIBUTE_ARCHIVE', 1), (\n 'FILE_ATTRIBUTE_DIRECTORY', 1), ('Reserved1', 1), (\n 'FILE_ATTRIBUTE_SYSTEM', 1), ('FILE_ATTRIBUTE_HIDDEN', 1), (\n 'FILE_ATTRIBUTE_READONLY', 1))\n", (3279, 3842), False, 'import C\n'), ((4565, 4770), 'C.STRUCT', 'C.STRUCT', (['stream', 'offset', 'max_size', 'parent', 'name', '"""HotKeyFlags"""', "('LowByte', C.BYTE)", "('HighByte', {C.BITFIELD: (('Reserved', 5), ('HOTKEYF_ALT', 1), (\n 'HOTKEYF_CONTROL', 1), ('HOTKEYF_SHIFT', 1))})"], {}), "(stream, offset, max_size, parent, name, 'HotKeyFlags', ('LowByte',\n C.BYTE), ('HighByte', {C.BITFIELD: (('Reserved', 5), ('HOTKEYF_ALT', 1),\n ('HOTKEYF_CONTROL', 1), ('HOTKEYF_SHIFT', 1))}))\n", (4573, 4770), False, 'import C\n'), ((5509, 5969), 'C.STRUCT', 'C.STRUCT', (['stream', 'offset', 'max_size', 'parent', 'name', '"""LNK_HEADER"""', "('HeaderSize', C.DWORD)", "('LinkCLSID', struct_GUID)", "('LinkFlags', bitflags_LinkFlags)", "('FileAttributes', bitflags_FileAttributes)", "('CreationTime', C.QWORD)", "('AccessTime', C.QWORD)", "('WriteTime', C.QWORD)", "('FileSize', C.UINT)", "('IconIndex', C.INT)", "('ShowCommand', C.UINT)", "('HotKey', C.WORD)", "('Reserved1', C.WORD)", "('Reserved2', C.DWORD)", "('Reserved3', C.DWORD)"], {}), "(stream, offset, max_size, parent, name, 'LNK_HEADER', (\n 'HeaderSize', C.DWORD), ('LinkCLSID', struct_GUID), ('LinkFlags',\n bitflags_LinkFlags), ('FileAttributes', bitflags_FileAttributes), (\n 'CreationTime', C.QWORD), ('AccessTime', C.QWORD), ('WriteTime', C.\n QWORD), ('FileSize', C.UINT), ('IconIndex', C.INT), ('ShowCommand', C.\n UINT), ('HotKey', C.WORD), ('Reserved1', C.WORD), ('Reserved2', C.DWORD\n ), ('Reserved3', C.DWORD))\n", (5517, 5969), False, 'import C\n')]
|
import alepy
import numpy as np
import os.path as osp
from control3 import CTRL_ROOT
# import cv2
world = alepy.AtariWorld(osp.join(CTRL_ROOT,"domain_data/atari_roms/space_invaders.bin"))
for j in xrange(5):
x0 = world.GetInitialState(np.random.randint(0,50))
u0 = np.array([0],'uint8')
y,r,o,d = world.Step(x0,u0)
for i in xrange(3):
y1,r1,o1,d1 = world.Step(x0,u0)
assert (y==y1).all() and (r==r1) and (np.array(o)==np.array(o1)).all()
nsteps = np.random.randint(10)
x = x0
for t in xrange(nsteps):
u = np.array([np.random.randint(0,10)],dtype='uint8')
x,_,_,_ = world.Step(x,u)
|
[
"numpy.array",
"numpy.random.randint",
"os.path.join"
] |
[((124, 188), 'os.path.join', 'osp.join', (['CTRL_ROOT', '"""domain_data/atari_roms/space_invaders.bin"""'], {}), "(CTRL_ROOT, 'domain_data/atari_roms/space_invaders.bin')\n", (132, 188), True, 'import os.path as osp\n'), ((276, 298), 'numpy.array', 'np.array', (['[0]', '"""uint8"""'], {}), "([0], 'uint8')\n", (284, 298), True, 'import numpy as np\n'), ((242, 266), 'numpy.random.randint', 'np.random.randint', (['(0)', '(50)'], {}), '(0, 50)\n', (259, 266), True, 'import numpy as np\n'), ((493, 514), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (510, 514), True, 'import numpy as np\n'), ((589, 613), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (606, 613), True, 'import numpy as np\n'), ((441, 452), 'numpy.array', 'np.array', (['o'], {}), '(o)\n', (449, 452), True, 'import numpy as np\n'), ((454, 466), 'numpy.array', 'np.array', (['o1'], {}), '(o1)\n', (462, 466), True, 'import numpy as np\n')]
|
import pandas as pd
import argparse
import mlflow
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
import xgboost as xgb
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from urllib.parse import urlparse
if __name__ == "__main__":
df_train = pd.read_csv('../../../data/processed/processed_application_train.csv')
df_test = pd.read_csv('../../../data/processed/processed_application_test.csv')
# get argument for the model
def parse_args():
parser = argparse.ArgumentParser(description="XGBoost example")
parser.add_argument(
"--learning-rate",
type=float,
default=0.3,
help="learning rate to update step size at each boosting step (default: 0.3)",
)
parser.add_argument(
"--n-estimators",
type=int,
default=10,
help="Number of boosting rounds. (default: 10)",
)
return parser.parse_args()
args = parse_args()
# Separate majority and minority classes
df_majority = df_train[df_train["TARGET"] == 0]
df_minority = df_train[df_train["TARGET"] == 1]
# Downsample majority class
df_majority_downsampled = resample(df_majority,
replace=False, # sample without replacement
n_samples=50000, # to match minority class
random_state=123) # reproducible results
# Combine minority class with downsampled majority class
df_downsampled = pd.concat([df_majority_downsampled, df_minority])
X = df_downsampled.drop(columns="TARGET")
y = df_downsampled['TARGET']
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
#Run mlflow
with mlflow.start_run():
xgb_model = xgb.XGBClassifier(n_estimators=args.n_estimators,learning_rate=args.learning_rate,random_state=42)
xgb_model.fit(X_train, y_train)
xgb_pred = xgb_model.predict(X_test)
print("This is the accuracy score for XGBClassifier : ")
acc = accuracy_score(y_test, xgb_pred)
print(acc)
print("This is the confusion matrix score for XGBClassifier : ")
cm = confusion_matrix(y_test, xgb_pred)
print(confusion_matrix(y_test, xgb_pred))
#log metric confusion metrix
t_n, f_p, f_n, t_p = cm.ravel()
mlflow.log_metric("true_negative", t_n)
mlflow.log_metric("false_positive", f_p)
mlflow.log_metric("false_negative", f_n)
mlflow.log_metric("true_positive", t_p)
mlflow.log_metrics({"accuracy": acc})
mlflow.log_param("learning_rate",args.learning_rate)
mlflow.log_param("estimators",args.n_estimators)
tracking_url_type_store = urlparse(mlflow.get_tracking_uri()).scheme
# Model registry does not work with file store
if tracking_url_type_store != "file":
# Register the model
mlflow.sklearn.log_model(xgb_model, "model", registered_model_name="XGboost")
else:
mlflow.sklearn.log_model(xgb_model, "model")
|
[
"mlflow.start_run",
"mlflow.log_param",
"argparse.ArgumentParser",
"mlflow.sklearn.log_model",
"mlflow.get_tracking_uri",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"mlflow.log_metrics",
"sklearn.utils.resample",
"xgboost.XGBClassifier",
"sklearn.metrics.confusion_matrix",
"pandas.concat",
"mlflow.log_metric",
"xgboost.DMatrix"
] |
[((327, 397), 'pandas.read_csv', 'pd.read_csv', (['"""../../../data/processed/processed_application_train.csv"""'], {}), "('../../../data/processed/processed_application_train.csv')\n", (338, 397), True, 'import pandas as pd\n'), ((412, 481), 'pandas.read_csv', 'pd.read_csv', (['"""../../../data/processed/processed_application_test.csv"""'], {}), "('../../../data/processed/processed_application_test.csv')\n", (423, 481), True, 'import pandas as pd\n'), ((1306, 1377), 'sklearn.utils.resample', 'resample', (['df_majority'], {'replace': '(False)', 'n_samples': '(50000)', 'random_state': '(123)'}), '(df_majority, replace=False, n_samples=50000, random_state=123)\n', (1314, 1377), False, 'from sklearn.utils import resample\n'), ((1659, 1708), 'pandas.concat', 'pd.concat', (['[df_majority_downsampled, df_minority]'], {}), '([df_majority_downsampled, df_minority])\n', (1668, 1708), True, 'import pandas as pd\n'), ((1850, 1904), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (1866, 1904), False, 'from sklearn.model_selection import train_test_split\n'), ((1919, 1954), 'xgboost.DMatrix', 'xgb.DMatrix', (['X_train'], {'label': 'y_train'}), '(X_train, label=y_train)\n', (1930, 1954), True, 'import xgboost as xgb\n'), ((1967, 2000), 'xgboost.DMatrix', 'xgb.DMatrix', (['X_test'], {'label': 'y_test'}), '(X_test, label=y_test)\n', (1978, 2000), True, 'import xgboost as xgb\n'), ((555, 609), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""XGBoost example"""'}), "(description='XGBoost example')\n", (578, 609), False, 'import argparse\n'), ((2028, 2046), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (2044, 2046), False, 'import mlflow\n'), ((2069, 2174), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {'n_estimators': 'args.n_estimators', 'learning_rate': 'args.learning_rate', 'random_state': '(42)'}), '(n_estimators=args.n_estimators, learning_rate=args.\n learning_rate, random_state=42)\n', (2086, 2174), True, 'import xgboost as xgb\n'), ((2333, 2365), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'xgb_pred'], {}), '(y_test, xgb_pred)\n', (2347, 2365), False, 'from sklearn.metrics import accuracy_score\n'), ((2471, 2505), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'xgb_pred'], {}), '(y_test, xgb_pred)\n', (2487, 2505), False, 'from sklearn.metrics import confusion_matrix\n'), ((2650, 2689), 'mlflow.log_metric', 'mlflow.log_metric', (['"""true_negative"""', 't_n'], {}), "('true_negative', t_n)\n", (2667, 2689), False, 'import mlflow\n'), ((2698, 2738), 'mlflow.log_metric', 'mlflow.log_metric', (['"""false_positive"""', 'f_p'], {}), "('false_positive', f_p)\n", (2715, 2738), False, 'import mlflow\n'), ((2747, 2787), 'mlflow.log_metric', 'mlflow.log_metric', (['"""false_negative"""', 'f_n'], {}), "('false_negative', f_n)\n", (2764, 2787), False, 'import mlflow\n'), ((2796, 2835), 'mlflow.log_metric', 'mlflow.log_metric', (['"""true_positive"""', 't_p'], {}), "('true_positive', t_p)\n", (2813, 2835), False, 'import mlflow\n'), ((2849, 2886), 'mlflow.log_metrics', 'mlflow.log_metrics', (["{'accuracy': acc}"], {}), "({'accuracy': acc})\n", (2867, 2886), False, 'import mlflow\n'), ((2895, 2948), 'mlflow.log_param', 'mlflow.log_param', (['"""learning_rate"""', 'args.learning_rate'], {}), "('learning_rate', args.learning_rate)\n", (2911, 2948), False, 'import mlflow\n'), ((2956, 3005), 'mlflow.log_param', 'mlflow.log_param', (['"""estimators"""', 'args.n_estimators'], {}), "('estimators', args.n_estimators)\n", (2972, 3005), False, 'import mlflow\n'), ((2520, 2554), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'xgb_pred'], {}), '(y_test, xgb_pred)\n', (2536, 2554), False, 'from sklearn.metrics import confusion_matrix\n'), ((3235, 3312), 'mlflow.sklearn.log_model', 'mlflow.sklearn.log_model', (['xgb_model', '"""model"""'], {'registered_model_name': '"""XGboost"""'}), "(xgb_model, 'model', registered_model_name='XGboost')\n", (3259, 3312), False, 'import mlflow\n'), ((3339, 3383), 'mlflow.sklearn.log_model', 'mlflow.sklearn.log_model', (['xgb_model', '"""model"""'], {}), "(xgb_model, 'model')\n", (3363, 3383), False, 'import mlflow\n'), ((3053, 3078), 'mlflow.get_tracking_uri', 'mlflow.get_tracking_uri', ([], {}), '()\n', (3076, 3078), False, 'import mlflow\n')]
|
"""
Customizing measures arguments
==============================
In this example we will show you how to custorize the measures.
"""
# Load a dataset
from sklearn.datasets import load_iris
from pymfe.mfe import MFE
data = load_iris()
y = data.target
X = data.data
###############################################################################
# Custom Arguments
# ----------------
#
# It is possible to pass custom arguments to every meta-feature using PyMFE
# extract method kwargs. The keywords must be the target meta-feature name, and
# the value must be a dictionary in the format {argument: value}, i.e., each
# key in the dictionary is a target argument with its respective value. In the
# example below, the extraction of metafeatures ``min`` and ``max`` happens as
# usual, but the meta-features ``sd``, ``nr_norm`` and ``nr_cor_attr`` will
# receive user custom argument values, which will interfere in each metafeature
# result.
# Extract measures with custom user arguments
mfe = MFE(features=["sd", "nr_norm", "nr_cor_attr", "min", "max"])
mfe.fit(X, y)
ft = mfe.extract(
sd={"ddof": 0},
nr_norm={"method": "all", "failure": "hard", "threshold": 0.025},
nr_cor_attr={"threshold": 0.6},
)
print("\n".join("{:50} {:30}".format(x, y) for x, y in zip(ft[0], ft[1])))
|
[
"sklearn.datasets.load_iris",
"pymfe.mfe.MFE"
] |
[((227, 238), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (236, 238), False, 'from sklearn.datasets import load_iris\n'), ((1001, 1061), 'pymfe.mfe.MFE', 'MFE', ([], {'features': "['sd', 'nr_norm', 'nr_cor_attr', 'min', 'max']"}), "(features=['sd', 'nr_norm', 'nr_cor_attr', 'min', 'max'])\n", (1004, 1061), False, 'from pymfe.mfe import MFE\n')]
|
#!/usr/bin/env python3
# MIT License
# Copyright (c) 2020 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import ipaddress
from subprocess import DEVNULL, Popen
class Color:
PURPLE = "\033[95m"
CYAN = "\033[96m"
DARKCYAN = "\033[36m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
END = "\033[0m"
def cred():
print(
Color.DARKCYAN
+ "\n"
+ "*********************************\n"
+ "* Pingsweep script *\n"
+ "* *\n"
+ "* Written and maintained by: *\n"
+ "* <NAME> *\n"
+ "* <EMAIL> *\n"
+ "* https://github.com/strohmy86 *\n"
+ "* *\n"
+ "*********************************\n"
+ "\n"
+ Color.END
)
def main(net_addr, file):
net = net_addr.replace(".", "_")
net = net.replace("/", "-")
# Create the network
ip_net = ipaddress.ip_network(net_addr)
# Get all hosts on that network
all_hosts = list(ip_net.hosts())
p = {} # ip -> process
for n in range(len(all_hosts)): # start ping process
ip = str(all_hosts[n])
p[ip] = Popen(
["ping", "-n", "-c", "1", "-w", "2", ip],
stdout=DEVNULL,
stderr=DEVNULL,
)
t = [] # List for active IP addresses
if file is True:
f = open("/home/lstrohm/ActiveIps-" + net + ".txt", "w")
f.close()
while p:
for ip, proc in p.items():
if proc.poll() is not None: # ping finished
del p[ip] # remove from the process list
if proc.returncode == 0 and file is False:
print("%s active" % ip)
t.append(ip)
elif proc.returncode == 0 and file is True:
f = open("/home/lstrohm/ActiveIps-" + net + ".txt", "a")
f.write("%s\n" % ip)
# else:
# print('%s error' % ip)
break
# Count total number of active IP addresses
if file is True:
fr = open("/home/lstrohm/ActiveIps-" + net + ".txt", "r")
total = len(fr.readlines())
fr.close()
fw = open("/home/lstrohm/ActiveIps-" + net + ".txt", "a")
fw.write("Total Active Devices: %s" % total)
fw.close()
print(
Color.CYAN
+ "Saved list to ~/ActiveIps-"
+ net
+ ".txt"
+ Color.END
)
elif file is False:
print(Color.YELLOW + "Total Active Devices: %s" % len(t) + Color.END)
# Starts the script.
parser = argparse.ArgumentParser(description="Script ping sweep a subnet")
parser.add_argument(
"-f",
"--file",
default=False,
action="store_const",
const=True,
help="Write results to a text file.",
)
parser.add_argument(
"net",
metavar="Network Subnet",
default="",
type=str,
help="network address in CIDR format (ex.192.168.1.0/24)",
)
args = parser.parse_args()
net_addr = args.net
file = args.file
cred()
main(net_addr, file)
|
[
"subprocess.Popen",
"ipaddress.ip_network",
"argparse.ArgumentParser"
] |
[((3784, 3849), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script ping sweep a subnet"""'}), "(description='Script ping sweep a subnet')\n", (3807, 3849), False, 'import argparse\n'), ((2090, 2120), 'ipaddress.ip_network', 'ipaddress.ip_network', (['net_addr'], {}), '(net_addr)\n', (2110, 2120), False, 'import ipaddress\n'), ((2327, 2406), 'subprocess.Popen', 'Popen', (["['ping', '-n', '-c', '1', '-w', '2', ip]"], {'stdout': 'DEVNULL', 'stderr': 'DEVNULL'}), "(['ping', '-n', '-c', '1', '-w', '2', ip], stdout=DEVNULL, stderr=DEVNULL)\n", (2332, 2406), False, 'from subprocess import DEVNULL, Popen\n')]
|
import unittest
import requests
endpoint = "https://app.zipcodebase.com/api/v1"
apikey = ""
# https://app.zipcodebase.com/documentation
class UnitTestsZipcodebaseWithTorNetwork(unittest.TestCase):
def test_Authentification_Remaining_Credits(self):
print("test_Authentification_Remaining_Credits")
headers = {
'apikey': apikey
}
url = endpoint + "/status"
r = requests.get(url, headers=headers)
print(r.text)
def test_Postal_code_to_location_information(self):
print("test_Postal_code_to_location_information")
headers = {
'apikey': apikey
}
params = (
("codes", "10005,51503"),
)
url = endpoint + "/search"
r = requests.get(url, headers=headers, params=params)
print(r.text)
def test_Distance_calculation_between_postal_codes(self):
print("test_Distance_calculation_between_postal_codes")
headers = {
'apikey': apikey
}
params = (
("code", "10005"),
("compare", "10006,10007"),
("country", "us"),
)
url = endpoint + "/distance"
r = requests.get(url, headers=headers, params=params)
print(r.text)
def test_Postal_codes_within_a_radius(self):
print("test_Postal_codes_within_a_radius")
headers = {
'apikey': apikey
}
params = (
("code", "10005"),
("radius", "100"),
("country", "us"),
)
url = endpoint + "/radius"
r = requests.get(url, headers=headers, params=params)
print(r.text)
def test_Postal_codes_within_a_certain_distance(self):
print("test_Postal_codes_within_a_certain_distance")
headers = {
'apikey': apikey
}
params = (
("codes", "10005,10006,10009,90001"),
("distance", "100"),
("country", "us"),
)
url = endpoint + "/match"
r = requests.get(url, headers=headers, params=params)
print(r.text)
def test_Postal_codes_by_city(self):
print("test_Postal_codes_by_city")
headers = {
'apikey': apikey
}
params = (
("city", "Amsterdam"),
("state_name", "Noord-Holland"),
("country", "nl"),
)
url = endpoint + "/code/city"
r = requests.get(url, headers=headers, params=params)
print(r.text)
def test_Postal_codes_by_state(self):
print("test_Postal_codes_by_state")
headers = {
'apikey': apikey
}
params = (
("state_name", "Noord-Holland"),
("country", "nl"),
)
url = endpoint + "/code/state"
r = requests.get(url, headers=headers, params=params)
print(r.text)
def test_Provinces_states_of_a_country(self):
print("test_Provinces_states_of_a_country")
headers = {
'apikey': apikey
}
params = (
("country", "de"),
)
url = endpoint + "/country/province"
r = requests.get(url, headers=headers, params=params)
print(r.text)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"requests.get"
] |
[((3318, 3333), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3331, 3333), False, 'import unittest\n'), ((421, 455), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (433, 455), False, 'import requests\n'), ((771, 820), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (783, 820), False, 'import requests\n'), ((1214, 1263), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (1226, 1263), False, 'import requests\n'), ((1620, 1669), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (1632, 1669), False, 'import requests\n'), ((2066, 2115), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (2078, 2115), False, 'import requests\n'), ((2477, 2526), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (2489, 2526), False, 'import requests\n'), ((2856, 2905), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (2868, 2905), False, 'import requests\n'), ((3212, 3261), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'params': 'params'}), '(url, headers=headers, params=params)\n', (3224, 3261), False, 'import requests\n')]
|
#!/usr/bin/env python3
import json
import sys
import numpy as np
import cv2
import math
import time
from collections import namedtuple
from cscore import CameraServer
from networktables import NetworkTables
# Magic Numbers
lowerGreen = (50, 120, 130) # Our Robot's Camera
higherGreen = (100, 220, 220)
minContourArea = 10
angleOffset = 10
rightAngleSize = -14
leftAngleSize = -75.5
screenX = 320
screenY = 240
screenSize = (screenX, screenY)
distance_away = 110
realTapeDistance = 0.2 # metres between closest tape points
focal_length = 325
# Initialisation
configFile = "/boot/frc.json"
CameraConfig = namedtuple("CameraConfig", ["name", "path", "config"])
def readCameraConfig(config):
"""Read single camera configuration."""
return CameraConfig(config["name"], config["path"], config)
def readConfig():
"""Read configuration file."""
# parse file
with open(configFile) as f:
j = json.load(f)
# cameras
cameras = j["cameras"]
cameras = [readCameraConfig(camera) for camera in cameras]
return cameras
# Our code begins here
def startCamera(config):
"""Start running the camera."""
cs = CameraServer.getInstance()
camera = cs.startAutomaticCapture(name=config.name, path=config.path)
camera.setConfigJson(json.dumps(config.config))
return cs, camera
# Process Functions
def getDistance(boxes):
if boxes is None:
return math.nan, math.nan
Lpoint = max(boxes[0], key=lambda x: x[0])
Rpoint = min(boxes[1], key=lambda x: x[0])
width = abs(Lpoint[0] - Rpoint[0])
mid = (Rpoint[0] + Lpoint[0]) / 2
distance_from_center = mid - screenX / 2
offset = getOffset(width, distance_from_center)
if width > 0:
dist = (realTapeDistance * focal_length) / width
return dist, offset
else:
return math.nan, offset
def getOffset(width, x):
# if width = 20cm then what is x in cm
offset = x / (width / (realTapeDistance))
return -offset
def createAnnotatedDisplay(
frame: np.array, pairs: list, closestToMiddle: tuple, circle: tuple
) -> np.array:
frame = cv2.line(frame, (160, 0), (160, 240), (255, 0, 0), thickness=1)
for pair in pairs:
if (pair[0][1][0] == closestToMiddle[0][0]).all():
colour = (0, 255, 0) #Green
frame = cv2.circle(
frame, (int(circle[0][0]), int(circle[0][1])), int(circle[1]), colour
)
else:
colour = (0, 0, 255) #Red
for tape in pair:
frame = cv2.drawContours(
frame, [np.int0(tape[1])], 0, colour, thickness=2
)
return frame
def getRetroPos(frame: np.array, annotated: bool, hsv: np.array, mask: np.array) -> (np.array, float, float):
"""Function for finding retro-reflective tape"""
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV, dst=hsv)
# Convert to HSV to make the mask easier
mask = cv2.inRange(hsv, lowerGreen, higherGreen, dst=mask)
# Create a mask of everything in between the greens
_, contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Find the contours
if len(contours) <= 1:
# Get contours with area above magic number 10 and append its smallest rectangle
return frame, math.nan, math.nan
rects = []
for cnt in contours:
if cv2.contourArea(cnt) > minContourArea:
rects.append(cv2.minAreaRect(cnt))
boxed_and_angles = []
for rect in rects:
if math.isclose(rect[2], leftAngleSize, abs_tol=angleOffset):
boxed_and_angles.append([False, np.array(cv2.boxPoints(rect)), cv2.contourArea(cv2.boxPoints(rect))])
elif math.isclose(rect[2], rightAngleSize, abs_tol=angleOffset):
boxed_and_angles.append([True, np.array(cv2.boxPoints(rect)), cv2.contourArea(cv2.boxPoints(rect))])
pairs = []
leftRect = None
for rect in sorted(
boxed_and_angles, key=lambda x: max(x[1][:, 0]) if x[0] else min(x[1][:, 0])
): # Get rectangle pairs
if not rect[0]:
leftRect = rect
elif leftRect and math.isclose(leftRect[2], rect[2], abs_tol=0.3*leftRect[2]):
pairs.append((leftRect, rect))
leftRect = None
if len(pairs) < 1:
return frame, math.nan, math.nan
closestToMiddle = list(min(
pairs, key=lambda x: abs(np.mean([x[0][1][:,0] + x[1][1][:,0]]) - screenSize[0])
))
closestToMiddle = [closestToMiddle[0][1], closestToMiddle[1][1]]
(x, y), radius = cv2.minEnclosingCircle(np.array(closestToMiddle).reshape(-1, 2))
if annotated:
frame = createAnnotatedDisplay(frame, pairs, closestToMiddle, ((x, y), radius))
dist, offset = getDistance(closestToMiddle)
return (
frame,
dist,
offset,
)
if __name__ == "__main__":
if len(sys.argv) >= 2:
configFile = sys.argv[1]
# read configuration
cameraConfigs = readConfig()
# start NetworkTables
NetworkTables.initialize(server="10.47.74.2")
NetworkTables.setUpdateRate(1)
nt = NetworkTables.getTable("/vision")
ping = nt.getEntry("ping")
raspi_pong = nt.getEntry("raspi_pong")
rio_pong = nt.getEntry("rio_pong")
entry_game_piece = nt.getEntry("game_piece")
entry_dist = nt.getEntry("fiducial_x")
entry_offset = nt.getEntry("fiducial_y")
entry_fiducial_time = nt.getEntry("fiducial_time")
entry_camera = nt.getEntry("using_cargo_camera")
# start cameras
cameras = []
for cameraConfig in cameraConfigs:
cameras.append(startCamera(cameraConfig))
cargo_rocket_sink = cameras[0][0].getVideo(camera=cameras[0][1])
hatch_sink = cameras[1][0].getVideo(camera=cameras[1][1])
source = cameras[0][0].putVideo("Driver_Stream", screenX, screenY)
frame = np.zeros(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)
image = np.zeros(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)
hsv = np.zeros(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)
mask = np.zeros(shape=(screenSize[1], screenSize[0]), dtype=np.uint8)
img = np.zeros(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)
old_ping_time = 0
while True:
ping_time = ping.getNumber(0)
if abs(ping_time - old_ping_time) > 0.00000001:
raspi_pong.setNumber(time.monotonic())
rio_pong.setNumber(ping_time)
old_ping_time = ping_time
game_piece = entry_game_piece.getBoolean(0)
fiducial_time = time.monotonic()
sink = hatch_sink if game_piece == 0 else cargo_rocket_sink
entry_camera.setBoolean(False if not game_piece else True)
frame_time, frame = sink.grabFrameNoTimeout(image=frame)
if frame_time == 0:
print(sink.getError(), file=sys.stderr)
source.notifyError(sink.getError())
outtake = False
percent = math.nan
else:
image, dist, offset = getRetroPos(frame, True, hsv, mask)
source.putFrame(image)
if not math.isnan(dist):
if game_piece == 1:
dist *= -1
offset *= -1
entry_dist.setNumber(dist)
entry_offset.setNumber(offset)
entry_fiducial_time.setNumber(fiducial_time)
NetworkTables.flush()
|
[
"networktables.NetworkTables.getTable",
"networktables.NetworkTables.flush",
"json.dumps",
"cv2.boxPoints",
"numpy.mean",
"cv2.minAreaRect",
"cv2.inRange",
"cv2.line",
"cv2.contourArea",
"cv2.cvtColor",
"networktables.NetworkTables.setUpdateRate",
"math.isnan",
"numpy.int0",
"networktables.NetworkTables.initialize",
"json.load",
"cscore.CameraServer.getInstance",
"numpy.zeros",
"time.monotonic",
"numpy.array",
"collections.namedtuple",
"math.isclose",
"cv2.findContours"
] |
[((609, 663), 'collections.namedtuple', 'namedtuple', (['"""CameraConfig"""', "['name', 'path', 'config']"], {}), "('CameraConfig', ['name', 'path', 'config'])\n", (619, 663), False, 'from collections import namedtuple\n'), ((1154, 1180), 'cscore.CameraServer.getInstance', 'CameraServer.getInstance', ([], {}), '()\n', (1178, 1180), False, 'from cscore import CameraServer\n'), ((2108, 2171), 'cv2.line', 'cv2.line', (['frame', '(160, 0)', '(160, 240)', '(255, 0, 0)'], {'thickness': '(1)'}), '(frame, (160, 0), (160, 240), (255, 0, 0), thickness=1)\n', (2116, 2171), False, 'import cv2\n'), ((2815, 2862), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {'dst': 'hsv'}), '(frame, cv2.COLOR_BGR2HSV, dst=hsv)\n', (2827, 2862), False, 'import cv2\n'), ((2919, 2970), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lowerGreen', 'higherGreen'], {'dst': 'mask'}), '(hsv, lowerGreen, higherGreen, dst=mask)\n', (2930, 2970), False, 'import cv2\n'), ((3049, 3111), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (3065, 3111), False, 'import cv2\n'), ((4985, 5030), 'networktables.NetworkTables.initialize', 'NetworkTables.initialize', ([], {'server': '"""10.47.74.2"""'}), "(server='10.47.74.2')\n", (5009, 5030), False, 'from networktables import NetworkTables\n'), ((5036, 5066), 'networktables.NetworkTables.setUpdateRate', 'NetworkTables.setUpdateRate', (['(1)'], {}), '(1)\n', (5063, 5066), False, 'from networktables import NetworkTables\n'), ((5076, 5109), 'networktables.NetworkTables.getTable', 'NetworkTables.getTable', (['"""/vision"""'], {}), "('/vision')\n", (5098, 5109), False, 'from networktables import NetworkTables\n'), ((5812, 5877), 'numpy.zeros', 'np.zeros', ([], {'shape': '(screenSize[1], screenSize[0], 3)', 'dtype': 'np.uint8'}), '(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)\n', (5820, 5877), True, 'import numpy as np\n'), ((5890, 5955), 'numpy.zeros', 'np.zeros', ([], {'shape': '(screenSize[1], screenSize[0], 3)', 'dtype': 'np.uint8'}), '(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)\n', (5898, 5955), True, 'import numpy as np\n'), ((5966, 6031), 'numpy.zeros', 'np.zeros', ([], {'shape': '(screenSize[1], screenSize[0], 3)', 'dtype': 'np.uint8'}), '(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)\n', (5974, 6031), True, 'import numpy as np\n'), ((6043, 6105), 'numpy.zeros', 'np.zeros', ([], {'shape': '(screenSize[1], screenSize[0])', 'dtype': 'np.uint8'}), '(shape=(screenSize[1], screenSize[0]), dtype=np.uint8)\n', (6051, 6105), True, 'import numpy as np\n'), ((6116, 6181), 'numpy.zeros', 'np.zeros', ([], {'shape': '(screenSize[1], screenSize[0], 3)', 'dtype': 'np.uint8'}), '(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)\n', (6124, 6181), True, 'import numpy as np\n'), ((921, 933), 'json.load', 'json.load', (['f'], {}), '(f)\n', (930, 933), False, 'import json\n'), ((1280, 1305), 'json.dumps', 'json.dumps', (['config.config'], {}), '(config.config)\n', (1290, 1305), False, 'import json\n'), ((3492, 3549), 'math.isclose', 'math.isclose', (['rect[2]', 'leftAngleSize'], {'abs_tol': 'angleOffset'}), '(rect[2], leftAngleSize, abs_tol=angleOffset)\n', (3504, 3549), False, 'import math\n'), ((6522, 6538), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (6536, 6538), False, 'import time\n'), ((7309, 7330), 'networktables.NetworkTables.flush', 'NetworkTables.flush', ([], {}), '()\n', (7328, 7330), False, 'from networktables import NetworkTables\n'), ((3346, 3366), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (3361, 3366), False, 'import cv2\n'), ((3678, 3736), 'math.isclose', 'math.isclose', (['rect[2]', 'rightAngleSize'], {'abs_tol': 'angleOffset'}), '(rect[2], rightAngleSize, abs_tol=angleOffset)\n', (3690, 3736), False, 'import math\n'), ((7056, 7072), 'math.isnan', 'math.isnan', (['dist'], {}), '(dist)\n', (7066, 7072), False, 'import math\n'), ((3410, 3430), 'cv2.minAreaRect', 'cv2.minAreaRect', (['cnt'], {}), '(cnt)\n', (3425, 3430), False, 'import cv2\n'), ((4104, 4165), 'math.isclose', 'math.isclose', (['leftRect[2]', 'rect[2]'], {'abs_tol': '(0.3 * leftRect[2])'}), '(leftRect[2], rect[2], abs_tol=0.3 * leftRect[2])\n', (4116, 4165), False, 'import math\n'), ((4544, 4569), 'numpy.array', 'np.array', (['closestToMiddle'], {}), '(closestToMiddle)\n', (4552, 4569), True, 'import numpy as np\n'), ((6348, 6364), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (6362, 6364), False, 'import time\n'), ((2566, 2582), 'numpy.int0', 'np.int0', (['tape[1]'], {}), '(tape[1])\n', (2573, 2582), True, 'import numpy as np\n'), ((3604, 3623), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (3617, 3623), False, 'import cv2\n'), ((3642, 3661), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (3655, 3661), False, 'import cv2\n'), ((3790, 3809), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (3803, 3809), False, 'import cv2\n'), ((3828, 3847), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (3841, 3847), False, 'import cv2\n'), ((4367, 4407), 'numpy.mean', 'np.mean', (['[x[0][1][:, 0] + x[1][1][:, 0]]'], {}), '([x[0][1][:, 0] + x[1][1][:, 0]])\n', (4374, 4407), True, 'import numpy as np\n')]
|
#!/usr/bin/python
import os.path
import os
import glob
import subprocess
import numpy as np
import numpy.lib.recfunctions as rfn
from astropy.io import fits
from astropy.stats import bayesian_blocks
import argparse
from collections import defaultdict
def checkDatFile(datFileName):
if not os.path.isfile(datFileName):
print(datFileName + ' is not a file!\n')
return False
return True
def extractErrors(errorStr):
errors = errorStr.replace('(', '').replace(')', '').split(' - ')
return float(errors[0]), float(errors[1])
def calibrate_ncp_prior(flux=None, fluxerr=None, time=None, timebin=None,
p_0=[0.05], n_sims=1000, min_prior=0.2, max_prior=4,
n_steps=20, outPrefix=None):
# path='./gammas/', exp='VERITAS', source=''):
# Calibration of ncp_prior:
# input:
# flux, fluxerr, time, timebin : Lightcurve in format numpy.ndarray or pandas.Series
# p_0 : FPR input array
# n_sims : float
# min_prior : float/int
# max_prior : float/int
# n_stepts : number of steps in [min_prior, max_prior]
sourceNow = outPrefix.split('/')[0]
falsecount = np.zeros(n_steps)
ncp_priors = np.linspace(min_prior, max_prior, n_steps)
result = {}
best = {}
# distance between points not relevant but should be ordered
x = np.arange(len(flux))
average = np.average(flux, weights=fluxerr)
# simulating lightcurves for n_sims times and applying algorithem
# in n_steps steps between min_prior and max_prior. Afterwards
# false positive rate is calculated if a block was detected.
for k in range(n_sims):
if k % 10 == 0:
print(sourceNow, 'current simulation: {}'.format(k))
# simulate the flux values
datapoints = np.random.normal(average, fluxerr, len(fluxerr))
# aply bayesian block and count fpr
for l, ncp_prior in enumerate(ncp_priors):
gamma = 10**(-ncp_prior)
bb = bayesian_blocks(x, datapoints, fluxerr, fitness='measures', gamma=gamma)
if len(bb) > 2:
falsecount[l] += 1
fp_rate = falsecount/n_sims
# Final result of FPR in dependency of ncp_prior
result = np.core.records.fromarrays([ncp_priors, fp_rate], names='ncp, fp')
# Calculation of best results for the values in p_0
for p0 in p_0:
best[str(p0)] = result[(np.abs(result.fp - p0)).argmin()]
# Saving result and best to txt file
with open(outPrefix + '_result.txt', 'wb') as fOut:
np.savetxt(fOut, result)
# with open(outPrefix + '_results_best.txt', 'wb') as fOut:
# np.savetxt(fOut, [best])
return(result, best)
def readSwiftLC(swiftFileName, rebin, veritasObs):
swiftFile = open(swiftFileName, 'r')
date, dateErrUp, dateErrDn = list(), list(), list()
rate, rateErrUp, rateErrDn = list(), list(), list()
mode = list()
for line in swiftFile:
if '!' in line:
if 'WT data' in line:
modeNow = 'WT'
continue
if 'PC data' in line:
modeNow = 'PC'
continue
if 'Upper limit' in line:
break
if '!' not in line and len(line) > 1 and 'NO' not in line and 'READ' not in line:
date.append(float(line.split()[0].strip()))
dateErrUp.append(abs(float(line.split()[1].strip())))
dateErrDn.append(abs(float(line.split()[2].strip())))
rate.append(float(line.split()[3].strip()))
rateErrUp.append(abs(float(line.split()[4].strip())))
rateErrDn.append(abs(float(line.split()[5].strip())))
mode.append(modeNow)
swiftData = np.c_[date, dateErrDn, dateErrUp,
rate, rateErrDn, rateErrUp,
mode]
headersType = {'names': ('Date', 'Date error down', 'Date error up',
'Rate', 'Rate error down', 'Rate error up',
'mode'),
'formats': ('f8', 'f8', 'f8',
'f8', 'f8', 'f8',
'U40')}
swiftData = np.core.records.fromarrays(swiftData.transpose(), dtype=headersType)
if rebin == 'monthly' or rebin == 'weekly' or rebin == 'yearly':
if rebin == 'yearly':
# Take only contemporaneous observations
swiftMask = list()
for swiftObsNow in swiftData['Date']:
keepSwift = False
for veritasObsNow in veritasObs:
if abs(swiftObsNow - veritasObsNow) < 1:
keepSwift = True
swiftMask.append(keepSwift)
swiftData = swiftData[swiftMask]
nDays = 28
if rebin == 'yearly':
nDays = 365
if rebin == 'weekly':
nDays = 7
mjd_min = 53423 # This is exactly 147 weeks before the start day of Fermi
mjd_max = 58465 # This is ~today
nBins = int((mjd_max - mjd_min)/nDays)
timeBins = np.linspace(mjd_min, mjd_max, nBins, False)
date, dateErrDn, dateErrUp = list(), list(), list()
rate, rateErrDn, rateErrUp = list(), list(), list()
mode = list()
for i_bin, edgeDn in enumerate(timeBins):
edgeUp = 1e6
if i_bin < len(timeBins) - 1:
edgeUp = timeBins[i_bin+1]
# TODO - should we divide into the different modes?
tempSwiftData = swiftData[(edgeDn <= swiftData['Date']) & (swiftData['Date'] < edgeUp)]
if len(tempSwiftData) > 0:
date.append(np.average(tempSwiftData['Date']))
dateErrDn.append(date[-1] - np.min(tempSwiftData['Date']))
dateErrUp.append(np.max(tempSwiftData['Date'] - date[-1]))
totalError = tempSwiftData['Rate error down'] + tempSwiftData['Rate error up']
rate.append(np.average(tempSwiftData['Rate'], weights=1./totalError))
rateErrDn.append(np.sqrt(np.sum(np.power(tempSwiftData['Rate error down'], 2))))
rateErrUp.append(np.sqrt(np.sum(np.power(tempSwiftData['Rate error up'], 2))))
mode.append('Combined')
swiftData = np.c_[date, dateErrDn, dateErrUp,
rate, rateErrDn, rateErrUp,
mode]
swiftData = np.core.records.fromarrays(swiftData.transpose(), dtype=headersType)
return swiftData
def rebinFermi(fermiLC, veritasObs):
# First convert to numpy array to make it easier
fermiLC = np.c_[fermiLC['tmax_mjd'], fermiLC['tmin_mjd'], fermiLC['flux'], fermiLC['flux_err']]
headersType = {'names': ('tmax_mjd', 'tmin_mjd', 'flux', 'flux_err'),
'formats': ('f8', 'f8', 'f8', 'f8')}
fermiLC = np.core.records.fromarrays(fermiLC.transpose(), dtype=headersType)
# Take only contemporaneous observations (in this case, within a month)
# fermiBlocks = bayesian_blocks(fermiLC['tmax_mjd'], fermiLC['flux'], fermiLC['flux_err']
fermiMask = list()
for fermiDataPoint in fermiLC['tmax_mjd']:
keepFermi = False
for veritasObsNow in veritasObs:
if abs(fermiDataPoint - veritasObsNow) < 28:
keepFermi = True
fermiMask.append(keepFermi)
fermiLC = fermiLC[fermiMask]
nDays = 365
mjd_min = 53423 # This is exactly 147 weeks before the start day of Fermi
mjd_max = 58465 # This is ~today
nBins = int((mjd_max - mjd_min)/nDays)
timeBins = np.linspace(mjd_min, mjd_max, nBins, False)
rebinnedFermi = defaultdict(list)
for i_bin, edgeDn in enumerate(timeBins):
edgeUp = 1e6
if i_bin < len(timeBins) - 1:
edgeUp = timeBins[i_bin+1]
tempFermiData = fermiLC[(edgeDn <= fermiLC['tmax_mjd']) & (fermiLC['tmax_mjd'] < edgeUp)]
if len(tempFermiData) > 0:
rebinnedFermi['tmax_mjd'].append(np.average(tempFermiData['tmax_mjd']))
rebinnedFermi['tmin_mjd'].append(np.average(tempFermiData['tmin_mjd']))
rebinnedFermi['flux'].append(np.average(tempFermiData['flux'],
weights=1./tempFermiData['flux_err']))
rebinnedFermi['flux_err'].append(np.sqrt(np.sum(np.power(tempFermiData['flux_err'],
2))))
fermiLC = np.c_[rebinnedFermi['tmax_mjd'], rebinnedFermi['tmin_mjd'],
rebinnedFermi['flux'], rebinnedFermi['flux_err']]
fermiLC = np.core.records.fromarrays(fermiLC.transpose(), dtype=headersType)
return fermiLC
def readCorrTable(corrTableFile):
headersType = {'names': ('Left edges', 'Right edges',
'Correction factor', 'CorrFactorError',
'CorrFactorErrorCons'),
'formats': ('f8', 'f8', 'f8', 'f8', 'f8')}
return np.loadtxt(corrTableFile, dtype=headersType)
def correctFluxesFromCrabLC(origLC, corrTable):
corrLC = np.copy(origLC)
for i_point, dateNow in enumerate(corrLC['DateMJD']):
corrBin = np.argmax(dateNow < corrTable['Right edges'])
if corrTable['Correction factor'][corrBin] != 1:
corrLC['Flux'][i_point] = (corrLC['Flux'][i_point] /
corrTable['Correction factor'][corrBin])
corrLC['Flux Error'][i_point] = np.sqrt(np.power(corrLC['Flux Error'][i_point], 2) +
np.power(corrTable['CorrFactorError'][corrBin] *
corrLC['Flux'][i_point], 2))
return corrLC
def correctFluxes(origLC, corrTable):
corrLC = correctFluxesFromCrabLC(origLC, corrTable)
# We increased the threshold, so no need to add a systematic uncertainty anymore
return corrLC
def determinePriors(veritasDatFileName, fermiFile, swiftFullFileName, corrTable,
veritasObsFile, sourceNow, binning):
for fileNow in [veritasDatFileName, fermiFile, swiftFullFileName]:
if not checkDatFile(fileNow):
return
veritasDatFile = open(veritasDatFileName, 'r')
headersType = {'names': ('DateMJD', 'Date Error',
'Flux', 'Flux Error'),
'formats': ('f8', 'f8',
'f8', 'f8')}
veritasData = np.loadtxt(veritasDatFile, dtype=headersType)
veritasFluxes = veritasData[veritasData['Flux Error'] > 0]
veritasFluxes = correctFluxes(veritasFluxes, corrTable)
nsims = 15000
n_steps = 40
experiment = 'veritas'
outPrefix = '{}/{}_{}_{}'.format(sourceNow,
experiment,
binning,
str(nsims))
result, best = calibrate_ncp_prior(flux=veritasFluxes['Flux'],
fluxerr=veritasFluxes['Flux Error'],
time=veritasFluxes['DateMJD'],
timebin=veritasFluxes['Date Error'],
p_0=[0.01, 0.05], n_sims=nsims,
min_prior=0.2, max_prior=4,
n_steps=n_steps, outPrefix=outPrefix)
gamma = 10**(- best[str(0.01)].ncp)
print(sourceNow, 'VERITAS', 'gamma - ', gamma)
if binning == 'yearly' or binning == 'monthly':
fermiDatFile = open(fermiFile, 'rb')
fermiLC = np.load(fermiDatFile, encoding='latin1').flat[0]
if binning == 'yearly':
headersType = {'names': ('run', 'date', 'flux', 'fluxError',
'significance', 'ze'),
'formats': ('f8', 'f8', 'f8', 'f8',
'f8', 'f8')}
veritasObs = np.loadtxt(veritasObsFile, dtype=headersType)
fermiLC = rebinFermi(fermiLC, veritasObs['date'])
experiment = 'fermi'
outPrefix = '{}/{}_{}_{}'.format(sourceNow,
experiment,
binning,
str(nsims))
result, best = calibrate_ncp_prior(flux=fermiLC['flux'],
fluxerr=fermiLC['flux_err'],
time=fermiLC['tmax_mjd'],
timebin=fermiLC['tmax_mjd'] - fermiLC['tmin_mjd'],
p_0=[0.01, 0.05], n_sims=nsims,
min_prior=0.2, max_prior=4,
n_steps=n_steps, outPrefix=outPrefix)
gamma = 10**(- best[str(0.01)].ncp)
print(sourceNow, 'Fermi', 'gamma - ', gamma)
swiftBinnings = [binning]
if binning == 'yearly': # run also the daily for Swift in this case
swiftBinnings = ['daily', 'yearly']
for swiftBinNow in swiftBinnings:
if swiftBinNow == 'yearly':
veritasObsDates = veritasObs['date']
else:
veritasObsDates = list()
swiftData = readSwiftLC(swiftFile, swiftBinNow, veritasObsDates)
experiment = 'swift'
outPrefix = '{}/{}_{}_{}'.format(sourceNow,
experiment,
swiftBinNow,
str(nsims))
swiftRateErrorAverage = (swiftData['Rate error down'] + swiftData['Rate error up'])/2.
result, best = calibrate_ncp_prior(flux=swiftData['Rate'],
fluxerr=swiftRateErrorAverage,
time=swiftData['Date'],
timebin=(swiftData['Date error down'] +
swiftData['Date error up']),
p_0=[0.01, 0.05], n_sims=nsims,
min_prior=0.2, max_prior=4,
n_steps=n_steps, outPrefix=outPrefix)
gamma = 10**(- best[str(0.01)].ncp)
print(sourceNow, 'Swift', swiftBinNow, 'gamma - ', gamma)
return
if __name__ == '__main__':
np.random.seed(1234)
parser = argparse.ArgumentParser(description=('Calculate optimal '
'priors for Bayesian blocks.'))
parser.add_argument('source')
parser.add_argument('binning')
args = parser.parse_args()
sources = {'1ES0033': '1ES 0033+595',
'1ES0502': '1ES 0502+675',
'1ES1011': '1ES 1011+496',
'1ES1218': '1ES 1218+304',
'1ES0229': '1ES 0229+200',
'RGBJ0710': 'RGB J0710+591',
'PG1553': 'PG 1553+113',
'PKS1424': 'PKS 1424+240'
}
if args.source not in sources:
print('Source', args.source, 'not known')
hdulist = fits.open(('/afs/ifh.de/group/cta/scratch/ogueta/sw/anaconda/envs/fermi/'
'lib/python2.7/site-packages/fermipy/data/catalogs/gll_psc_8year_v5.fit'))
sourceCatalog = hdulist[1].data
workDir = os.getcwd() + '/'
fermiPrefix = '/lustre/fs19/group/cta/users/ogueta/fermi/variabilityStudy/'
veritasPrefix = '/afs/ifh.de/group/cta/scratch/ogueta/vts/variabilityStudy/makeLC/'
swiftPrefix = '/afs/ifh.de/group/cta/scratch/ogueta/vts/variabilityStudy/swift/onlineTool/'
corrTableFile = ('/afs/ifh.de/group/cta/scratch/ogueta/vts/variabilityStudy/'
'crabStability/plotLC/correctionFactors.txt')
veritasObsPrefix = '/afs/ifh.de/group/cta/scratch/ogueta/vts/variabilityStudy/spectra/'
for i_src, sourceTeV in enumerate(sourceCatalog['ASSOC_TEV']):
if sources[args.source] in sourceTeV:
fermiLC = sourceCatalog['Source_Name'][i_src].replace(' ', '_').lower()
fermiLC += '_lightcurve.npy'
fermiBinning = args.binning
if fermiBinning != 'monthly':
fermiBinning = 'monthly'
fermiFile = os.path.join(fermiPrefix, '{}LightCurves'.format(fermiBinning),
args.source, args.source, fermiLC)
veritasDirectory = os.path.join(veritasPrefix, args.source)
veritasLC = glob.glob(os.path.join(veritasDirectory,
'{}*fullEnergyRange*.txt'.format(args.binning)))[0]
veritasFile = os.path.join(veritasDirectory, veritasLC)
corrTable = readCorrTable(corrTableFile)
veritasObsFile = os.path.join(os.path.join(veritasObsPrefix, args.source), 'fluxPerRun.txt')
swiftFile = os.path.join(swiftPrefix, args.source,
'dailyBins', '{}_lightcurve.qdp'.format(args.source))
try:
subprocess.check_call(['mkdir', '-p', args.source])
except subprocess.CalledProcessError as e:
print('Could not create output directory')
sys.exit(1)
determinePriors(veritasFile, fermiFile, swiftFile, corrTable,
veritasObsFile, args.source, args.binning)
|
[
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"astropy.stats.bayesian_blocks",
"numpy.argmax",
"numpy.abs",
"numpy.core.records.fromarrays",
"collections.defaultdict",
"os.path.isfile",
"os.path.join",
"subprocess.check_call",
"numpy.copy",
"numpy.power",
"numpy.savetxt",
"numpy.max",
"numpy.loadtxt",
"numpy.linspace",
"numpy.average",
"numpy.min",
"astropy.io.fits.open",
"os.getcwd",
"numpy.zeros"
] |
[((1234, 1251), 'numpy.zeros', 'np.zeros', (['n_steps'], {}), '(n_steps)\n', (1242, 1251), True, 'import numpy as np\n'), ((1269, 1311), 'numpy.linspace', 'np.linspace', (['min_prior', 'max_prior', 'n_steps'], {}), '(min_prior, max_prior, n_steps)\n', (1280, 1311), True, 'import numpy as np\n'), ((1451, 1484), 'numpy.average', 'np.average', (['flux'], {'weights': 'fluxerr'}), '(flux, weights=fluxerr)\n', (1461, 1484), True, 'import numpy as np\n'), ((2297, 2363), 'numpy.core.records.fromarrays', 'np.core.records.fromarrays', (['[ncp_priors, fp_rate]'], {'names': '"""ncp, fp"""'}), "([ncp_priors, fp_rate], names='ncp, fp')\n", (2323, 2363), True, 'import numpy as np\n'), ((7634, 7677), 'numpy.linspace', 'np.linspace', (['mjd_min', 'mjd_max', 'nBins', '(False)'], {}), '(mjd_min, mjd_max, nBins, False)\n', (7645, 7677), True, 'import numpy as np\n'), ((7699, 7716), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7710, 7716), False, 'from collections import defaultdict\n'), ((9038, 9082), 'numpy.loadtxt', 'np.loadtxt', (['corrTableFile'], {'dtype': 'headersType'}), '(corrTableFile, dtype=headersType)\n', (9048, 9082), True, 'import numpy as np\n'), ((9147, 9162), 'numpy.copy', 'np.copy', (['origLC'], {}), '(origLC)\n', (9154, 9162), True, 'import numpy as np\n'), ((10529, 10574), 'numpy.loadtxt', 'np.loadtxt', (['veritasDatFile'], {'dtype': 'headersType'}), '(veritasDatFile, dtype=headersType)\n', (10539, 10574), True, 'import numpy as np\n'), ((14482, 14502), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (14496, 14502), True, 'import numpy as np\n'), ((14517, 14606), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Calculate optimal priors for Bayesian blocks."""'}), "(description=\n 'Calculate optimal priors for Bayesian blocks.')\n", (14540, 14606), False, 'import argparse\n'), ((15213, 15366), 'astropy.io.fits.open', 'fits.open', (['"""/afs/ifh.de/group/cta/scratch/ogueta/sw/anaconda/envs/fermi/lib/python2.7/site-packages/fermipy/data/catalogs/gll_psc_8year_v5.fit"""'], {}), "(\n '/afs/ifh.de/group/cta/scratch/ogueta/sw/anaconda/envs/fermi/lib/python2.7/site-packages/fermipy/data/catalogs/gll_psc_8year_v5.fit'\n )\n", (15222, 15366), False, 'from astropy.io import fits\n'), ((16462, 16502), 'os.path.join', 'os.path.join', (['veritasPrefix', 'args.source'], {}), '(veritasPrefix, args.source)\n', (16474, 16502), False, 'import os\n'), ((16656, 16697), 'os.path.join', 'os.path.join', (['veritasDirectory', 'veritasLC'], {}), '(veritasDirectory, veritasLC)\n', (16668, 16697), False, 'import os\n'), ((296, 323), 'os.path.isfile', 'os.path.isfile', (['datFileName'], {}), '(datFileName)\n', (310, 323), False, 'import os\n'), ((2612, 2636), 'numpy.savetxt', 'np.savetxt', (['fOut', 'result'], {}), '(fOut, result)\n', (2622, 2636), True, 'import numpy as np\n'), ((5141, 5184), 'numpy.linspace', 'np.linspace', (['mjd_min', 'mjd_max', 'nBins', '(False)'], {}), '(mjd_min, mjd_max, nBins, False)\n', (5152, 5184), True, 'import numpy as np\n'), ((9240, 9285), 'numpy.argmax', 'np.argmax', (["(dateNow < corrTable['Right edges'])"], {}), "(dateNow < corrTable['Right edges'])\n", (9249, 9285), True, 'import numpy as np\n'), ((15438, 15449), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (15447, 15449), False, 'import os\n'), ((16777, 16820), 'os.path.join', 'os.path.join', (['veritasObsPrefix', 'args.source'], {}), '(veritasObsPrefix, args.source)\n', (16789, 16820), False, 'import os\n'), ((16997, 17048), 'subprocess.check_call', 'subprocess.check_call', (["['mkdir', '-p', args.source]"], {}), "(['mkdir', '-p', args.source])\n", (17018, 17048), False, 'import subprocess\n'), ((2061, 2133), 'astropy.stats.bayesian_blocks', 'bayesian_blocks', (['x', 'datapoints', 'fluxerr'], {'fitness': '"""measures"""', 'gamma': 'gamma'}), "(x, datapoints, fluxerr, fitness='measures', gamma=gamma)\n", (2076, 2133), False, 'from astropy.stats import bayesian_blocks\n'), ((12022, 12067), 'numpy.loadtxt', 'np.loadtxt', (['veritasObsFile'], {'dtype': 'headersType'}), '(veritasObsFile, dtype=headersType)\n', (12032, 12067), True, 'import numpy as np\n'), ((8041, 8078), 'numpy.average', 'np.average', (["tempFermiData['tmax_mjd']"], {}), "(tempFermiData['tmax_mjd'])\n", (8051, 8078), True, 'import numpy as np\n'), ((8125, 8162), 'numpy.average', 'np.average', (["tempFermiData['tmin_mjd']"], {}), "(tempFermiData['tmin_mjd'])\n", (8135, 8162), True, 'import numpy as np\n'), ((8205, 8279), 'numpy.average', 'np.average', (["tempFermiData['flux']"], {'weights': "(1.0 / tempFermiData['flux_err'])"}), "(tempFermiData['flux'], weights=1.0 / tempFermiData['flux_err'])\n", (8215, 8279), True, 'import numpy as np\n'), ((11667, 11707), 'numpy.load', 'np.load', (['fermiDatFile'], {'encoding': '"""latin1"""'}), "(fermiDatFile, encoding='latin1')\n", (11674, 11707), True, 'import numpy as np\n'), ((2472, 2494), 'numpy.abs', 'np.abs', (['(result.fp - p0)'], {}), '(result.fp - p0)\n', (2478, 2494), True, 'import numpy as np\n'), ((5721, 5754), 'numpy.average', 'np.average', (["tempSwiftData['Date']"], {}), "(tempSwiftData['Date'])\n", (5731, 5754), True, 'import numpy as np\n'), ((5864, 5904), 'numpy.max', 'np.max', (["(tempSwiftData['Date'] - date[-1])"], {}), "(tempSwiftData['Date'] - date[-1])\n", (5870, 5904), True, 'import numpy as np\n'), ((6029, 6088), 'numpy.average', 'np.average', (["tempSwiftData['Rate']"], {'weights': '(1.0 / totalError)'}), "(tempSwiftData['Rate'], weights=1.0 / totalError)\n", (6039, 6088), True, 'import numpy as np\n'), ((9540, 9582), 'numpy.power', 'np.power', (["corrLC['Flux Error'][i_point]", '(2)'], {}), "(corrLC['Flux Error'][i_point], 2)\n", (9548, 9582), True, 'import numpy as np\n'), ((9637, 9713), 'numpy.power', 'np.power', (["(corrTable['CorrFactorError'][corrBin] * corrLC['Flux'][i_point])", '(2)'], {}), "(corrTable['CorrFactorError'][corrBin] * corrLC['Flux'][i_point], 2)\n", (9645, 9713), True, 'import numpy as np\n'), ((5800, 5829), 'numpy.min', 'np.min', (["tempSwiftData['Date']"], {}), "(tempSwiftData['Date'])\n", (5806, 5829), True, 'import numpy as np\n'), ((8390, 8428), 'numpy.power', 'np.power', (["tempFermiData['flux_err']", '(2)'], {}), "(tempFermiData['flux_err'], 2)\n", (8398, 8428), True, 'import numpy as np\n'), ((6135, 6180), 'numpy.power', 'np.power', (["tempSwiftData['Rate error down']", '(2)'], {}), "(tempSwiftData['Rate error down'], 2)\n", (6143, 6180), True, 'import numpy as np\n'), ((6232, 6275), 'numpy.power', 'np.power', (["tempSwiftData['Rate error up']", '(2)'], {}), "(tempSwiftData['Rate error up'], 2)\n", (6240, 6275), True, 'import numpy as np\n')]
|
import tensorflow as tf
def upsample_layer(name, inputs):
"""
Takes the outputs of the previous convolutional layer and upsamples them by a factor of two
using the 'nearest neighbor' method.
Parameters
----------
name : string
The name of the tensor to be used in TensorBoard.
inputs : tensor
The output of the previous convolutional layer.
This tensor will have the shape of:
[batch_size, h, w, c]
Returns
-------
inputs : tensor
A tensor of shape:
[batch_size, 2 * h, 2 * w, c]
"""
with tf.variable_scope(name):
inputs = tf.image.resize_nearest_neighbor(inputs, (inputs.shape[1]*2, inputs.shape[2]*2))
return inputs
|
[
"tensorflow.variable_scope",
"tensorflow.image.resize_nearest_neighbor"
] |
[((590, 613), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (607, 613), True, 'import tensorflow as tf\n'), ((633, 722), 'tensorflow.image.resize_nearest_neighbor', 'tf.image.resize_nearest_neighbor', (['inputs', '(inputs.shape[1] * 2, inputs.shape[2] * 2)'], {}), '(inputs, (inputs.shape[1] * 2, inputs.shape\n [2] * 2))\n', (665, 722), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
'''
Core_Tests.py
Defines unit tests for core.
'''
#############
# IMPORTS #
#############
# standard python packages
import inspect, os, sqlite3, sys, unittest
from StringIO import StringIO
# ------------------------------------------------------ #
# import sibling packages HERE!!!
sys.path.append( os.path.abspath( __file__ + "/../../../../src" ) )
from dedt import dedt, dedalusParser, clockRelation, dedalusRewriter
from utils import tools
# ------------------------------------------------------ #
testPath = os.path.abspath(__file__+"/../../../../qa")
################
# CORE TESTS #
################
class Core_Tests( unittest.TestCase ) :
##########################
# INSTANTIATE LDFICORE #
##########################
#
#############
# ATTRIBS #
#############
def test_LDFICoreAttribs_dedt( self ) :
return None
#########################
# THREAD OF EXECUTION #
#########################
# use this main if running this script exclusively.
if __name__ == "__main__" :
unittest.main( verbosity=2 )
#########
# EOF #
#########
|
[
"unittest.main",
"os.path.abspath"
] |
[((549, 594), 'os.path.abspath', 'os.path.abspath', (["(__file__ + '/../../../../qa')"], {}), "(__file__ + '/../../../../qa')\n", (564, 594), False, 'import inspect, os, sqlite3, sys, unittest\n'), ((330, 376), 'os.path.abspath', 'os.path.abspath', (["(__file__ + '/../../../../src')"], {}), "(__file__ + '/../../../../src')\n", (345, 376), False, 'import inspect, os, sqlite3, sys, unittest\n'), ((1049, 1075), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1062, 1075), False, 'import inspect, os, sqlite3, sys, unittest\n')]
|
from flask import Flask, abort, render_template, redirect, url_for, request, session, send_from_directory, flash, jsonify
from markupsafe import escape
import random, os, database, json, requests
from werkzeug.utils import secure_filename
# create the application object
app = Flask(__name__)
app.config['DataFolder'] = "/".join(os.path.abspath(__file__).split("/")[:-1]) + "/" + "data"
app.secret_key = os.urandom(24)
def programExists(name):
"""Check whether `name` is on PATH and marked as executable."""
# from whichcraft import which
from shutil import which
return which(name) is not None
def eflash(error, back, title="Error!", backt=None, extra=None):
extra = extra if extra else ""
backt = backt if backt else ""
return render_template("error.html", e=error, url=back, error=title, urlt=backt, extra=extra)
def CoSo(version):
version = str(version)
return render_template("comingSoon.html", ver=version, background=database.Database("config.json").get('background'))
@app.route("/", methods=['GET'])
def homePage():
try:
popup = request.args['popup']
except Exception as e:
popup = None
return render_template('homePage.html', version=database.Database("config.json").get("version"), popup = popup, background=database.Database("config.json").get('background'))
@app.route('/data/<path:filename>/')
def returnData(filename):
return send_from_directory(app.config['DataFolder'],
filename)
@app.route('/videos/')
def videosList():
links=['<!--This Page Was Auto Generated-->\n<div align=\"center\">\n<br>\n<a href=/ ><img src=/data/home.png height=17px /></a> <input type="text" id="mySearch" onkeyup="myFunction()" placeholder="Search.." title="Type in a category">\n<br><br><ul id="myMenu">']
f = []
for (dirpath, dirnames, filenames) in os.walk(database.Database("config.json").get("videofolder")):
f.extend(dirnames)
break
for thing in f:
links.append("\n <li><a align='center' href='{}'><img src='{}' height=12% width=15% /><br><b>{}</b></a><br></li>".format('/videos/'+thing.replace("'", "%27"), '/videos/'+thing.replace("'", "%27")+'/thumb',thing))
links.append('</ul></div>')
return render_template('videos.html', links=''.join(links), background=database.Database("config.json").get('background'))
@app.route('/videos/<video>')
def videoPage(video):
for root, dirs, files in os.walk(database.Database("config.json").get("videofolder") + '/' + video):
for file in files:
if file.endswith('.description'):
with open(database.Database("config.json").get("videofolder") + '/' + video + '/' + file, 'r') as de:
desc = de.read()
try:
desc
except:
desc=''
break
for root, dirs, files in os.walk(database.Database("config.json").get("videofolder") + '/' + video):
for file in files:
if file.endswith('.mp4') or file.endswith('.webm'):
return render_template("video.html", path='/vidfile/' + video.replace("'", "%27") + "/" + file, description=desc.replace("\n", "\n<br>"), title=video, background=database.Database("config.json").get('background'))
break
@app.route('/videos/<video>/thumb')
def videoPageThumb(video):
for root, dirs, files in os.walk(database.Database("config.json").get("videofolder") + '/' + video):
print(files)
for file in files:
if file.endswith('.png') or file.endswith('.jpg') or file.endswith('.webp') or file.endswith('.jpeg'):
return send_from_directory(database.Database("config.json").get("videofolder") + "/" + video + "/",
file)
break
return send_from_directory("data", "eye.png")
@app.route("/vidfile/<folder>/<file>")
def videourlpagething(folder, file):
return send_from_directory(database.Database("config.json").get("videofolder") + "/" + folder + "/",
file)
@app.route('/credits/')
def creditsPage():
return render_template('credits.html', background=database.Database("config.json").get('background'))
@app.route('/add/')
def addVideoPage():
return render_template('addVideo.html', background=database.Database("config.json").get('background'))
@app.route('/add/yt/', methods=['GET', 'POST'])
def downloadYtVideo():
if not programExists("youtube-dl"):
return eflash('youtube-dl is not installed or is not on your PATH.', request.url)
if request.method == 'POST':
url = request.form['url']
if url != '':
os.system("python3 -m youtube_dl -f best -o \"" + database.Database("config.json").get("videofolder") + "/%(title)s/%(title)s.%(ext)s\"" + " --write-thumbnail --write-description " + url)
return redirect('/')
else:
return render_template('download.html', error='You must specify a URL!', background=database.Database("config.json").get('background'))
else:
return render_template("download.html", background=database.Database("config.json").get('background'))
@app.route('/add/mp4/', methods=['GET', 'POST'])
def downloadYtMP4():
if not programExists("youtube-dl"):
return eflash('youtube-dl is not installed or is not on your PATH.', request.url)
if request.method == 'POST':
url = request.form['url']
if url != '':
if os.path.exists("download.mp4"):
os.rm("download.mp4")
os.system("python3 -m youtube_dl -f best -o " + "download0.mp4 " + url)
return send_from_directory(".",
"download0.mp4", as_attachment=True)
else:
return render_template('download.html', error='You must specify a URL!', background=database.Database("config.json").get('background'))
else:
return render_template("download.html", background=database.Database("config.json").get('background'))
@app.route('/add/upload/', methods=['GET', 'POST'])
def uploadLocalVideo():
if request.method == 'POST':
if 'file' not in request.files:
return eflash('No selected file', request.url)
file = request.files['file']
if file.filename == '':
return eflash('No selected file', request.url)
elif request.form['title'] == '':
return eflash('Title is required', request.url)
else:
filename = secure_filename(file.filename)
os.mkdir(database.Database("config.json").get("videofolder") + "/" + request.form['title'])
file.save(os.path.join(database.Database("config.json").get("videofolder") + "/"+request.form['title'], filename))
with open(database.Database("config.json").get("videofolder") + "/"+request.form['title'] + '/' + request.form['title'] + ".description", 'w') as file1:
file1.write(request.form['nm'])
return redirect('/videos/{}'.format(request.form['title']))
return "Nothing happened???"
else:
return render_template("upload.html", background=database.Database("config.json").get('background'))
@app.route('/settings/', methods=['GET', 'POST'])
def settingsPage():
if request.method == 'POST':
config = database.Database("config.json")
for field in request.form:
config.set(field, request.form[field])
return redirect("/?popup=Settings%20Successfully%20Saved")
else:
config = database.Database("config.json")
return render_template("settings.html", config=config, background=database.Database("config.json").get('background'))
@app.errorhandler(404)
def page_not_found(e):
return eflash(e, url_for("homePage"), "404: Not Found", "Go Home", "Feature you want added? Submit a request at <a href=https://github.com/r2boyo25/yt-pi/issues/new/choose>my GitHub page. </a>")
#return render_template('404.html', error=e)
@app.errorhandler(400)
def bad_requesthandler(e):
return eflash(e, url_for("homePage"), "404: Not Found", "Go Home", "Submit a bug report at <a href=https://github.com/r2boyo25/yt-pi/issues/new/choose>my GitHub page. </a>")
#return render_template('400.html', error=e)
if __name__ == "__main__":
currentConfig = json.loads(requests.get("https://raw.githubusercontent.com/R2Boyo25/yt-pi/master/config.json").text)
if float(currentConfig["version"]) > float(database.Database("config.json").get('version')):
if not ("/" + ( '/'.join(os.path.abspath(database.Database("config.json").get("videofolder")).split("/")) ) in os.path.abspath("yt-pi.py")):
os.chdir("./..")
os.system("rm -rf yt-pi")
os.system("git clone https://github.com/r2boyo25/yt-pi")
os.chdir("yt-pi")
app.run(debug=True, host='0.0.0.0', port=database.Database("config.json").get("port"))
|
[
"os.path.abspath",
"flask.redirect",
"flask.Flask",
"os.path.exists",
"shutil.which",
"os.system",
"werkzeug.utils.secure_filename",
"flask.url_for",
"database.Database",
"flask.render_template",
"requests.get",
"os.chdir",
"flask.send_from_directory",
"os.rm",
"os.urandom"
] |
[((278, 293), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (283, 293), False, 'from flask import Flask, abort, render_template, redirect, url_for, request, session, send_from_directory, flash, jsonify\n'), ((407, 421), 'os.urandom', 'os.urandom', (['(24)'], {}), '(24)\n', (417, 421), False, 'import random, os, database, json, requests\n'), ((767, 857), 'flask.render_template', 'render_template', (['"""error.html"""'], {'e': 'error', 'url': 'back', 'error': 'title', 'urlt': 'backt', 'extra': 'extra'}), "('error.html', e=error, url=back, error=title, urlt=backt,\n extra=extra)\n", (782, 857), False, 'from flask import Flask, abort, render_template, redirect, url_for, request, session, send_from_directory, flash, jsonify\n'), ((1432, 1487), 'flask.send_from_directory', 'send_from_directory', (["app.config['DataFolder']", 'filename'], {}), "(app.config['DataFolder'], filename)\n", (1451, 1487), False, 'from flask import Flask, abort, render_template, redirect, url_for, request, session, send_from_directory, flash, jsonify\n'), ((4080, 4118), 'flask.send_from_directory', 'send_from_directory', (['"""data"""', '"""eye.png"""'], {}), "('data', 'eye.png')\n", (4099, 4118), False, 'from flask import Flask, abort, render_template, redirect, url_for, request, session, send_from_directory, flash, jsonify\n'), ((593, 604), 'shutil.which', 'which', (['name'], {}), '(name)\n', (598, 604), False, 'from shutil import which\n'), ((7832, 7864), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (7849, 7864), False, 'import random, os, database, json, requests\n'), ((7969, 8020), 'flask.redirect', 'redirect', (['"""/?popup=Settings%20Successfully%20Saved"""'], {}), "('/?popup=Settings%20Successfully%20Saved')\n", (7977, 8020), False, 'from flask import Flask, abort, render_template, redirect, url_for, request, session, send_from_directory, flash, jsonify\n'), ((8050, 8082), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (8067, 8082), False, 'import random, os, database, json, requests\n'), ((8291, 8310), 'flask.url_for', 'url_for', (['"""homePage"""'], {}), "('homePage')\n", (8298, 8310), False, 'from flask import Flask, abort, render_template, redirect, url_for, request, session, send_from_directory, flash, jsonify\n'), ((8592, 8611), 'flask.url_for', 'url_for', (['"""homePage"""'], {}), "('homePage')\n", (8599, 8611), False, 'from flask import Flask, abort, render_template, redirect, url_for, request, session, send_from_directory, flash, jsonify\n'), ((5196, 5209), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (5204, 5209), False, 'from flask import Flask, abort, render_template, redirect, url_for, request, session, send_from_directory, flash, jsonify\n'), ((5876, 5906), 'os.path.exists', 'os.path.exists', (['"""download.mp4"""'], {}), "('download.mp4')\n", (5890, 5906), False, 'import random, os, database, json, requests\n'), ((5960, 6031), 'os.system', 'os.system', (["('python3 -m youtube_dl -f best -o ' + 'download0.mp4 ' + url)"], {}), "('python3 -m youtube_dl -f best -o ' + 'download0.mp4 ' + url)\n", (5969, 6031), False, 'import random, os, database, json, requests\n'), ((6064, 6125), 'flask.send_from_directory', 'send_from_directory', (['"""."""', '"""download0.mp4"""'], {'as_attachment': '(True)'}), "('.', 'download0.mp4', as_attachment=True)\n", (6083, 6125), False, 'from flask import Flask, abort, render_template, redirect, url_for, request, session, send_from_directory, flash, jsonify\n'), ((8859, 8947), 'requests.get', 'requests.get', (['"""https://raw.githubusercontent.com/R2Boyo25/yt-pi/master/config.json"""'], {}), "(\n 'https://raw.githubusercontent.com/R2Boyo25/yt-pi/master/config.json')\n", (8871, 8947), False, 'import random, os, database, json, requests\n'), ((9210, 9226), 'os.chdir', 'os.chdir', (['"""./.."""'], {}), "('./..')\n", (9218, 9226), False, 'import random, os, database, json, requests\n'), ((9240, 9265), 'os.system', 'os.system', (['"""rm -rf yt-pi"""'], {}), "('rm -rf yt-pi')\n", (9249, 9265), False, 'import random, os, database, json, requests\n'), ((9279, 9335), 'os.system', 'os.system', (['"""git clone https://github.com/r2boyo25/yt-pi"""'], {}), "('git clone https://github.com/r2boyo25/yt-pi')\n", (9288, 9335), False, 'import random, os, database, json, requests\n'), ((9361, 9378), 'os.chdir', 'os.chdir', (['"""yt-pi"""'], {}), "('yt-pi')\n", (9369, 9378), False, 'import random, os, database, json, requests\n'), ((1898, 1930), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (1915, 1930), False, 'import random, os, database, json, requests\n'), ((5925, 5946), 'os.rm', 'os.rm', (['"""download.mp4"""'], {}), "('download.mp4')\n", (5930, 5946), False, 'import random, os, database, json, requests\n'), ((6974, 7004), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (6989, 7004), False, 'from werkzeug.utils import secure_filename\n'), ((9167, 9194), 'os.path.abspath', 'os.path.abspath', (['"""yt-pi.py"""'], {}), "('yt-pi.py')\n", (9182, 9194), False, 'import random, os, database, json, requests\n'), ((976, 1008), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (993, 1008), False, 'import random, os, database, json, requests\n'), ((1230, 1262), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (1247, 1262), False, 'import random, os, database, json, requests\n'), ((1305, 1337), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (1322, 1337), False, 'import random, os, database, json, requests\n'), ((2365, 2397), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (2382, 2397), False, 'import random, os, database, json, requests\n'), ((4438, 4470), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (4455, 4470), False, 'import random, os, database, json, requests\n'), ((4586, 4618), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (4603, 4618), False, 'import random, os, database, json, requests\n'), ((8997, 9029), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (9014, 9029), False, 'import random, os, database, json, requests\n'), ((9425, 9457), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (9442, 9457), False, 'import random, os, database, json, requests\n'), ((331, 356), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (346, 356), False, 'import random, os, database, json, requests\n'), ((2512, 2544), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (2529, 2544), False, 'import random, os, database, json, requests\n'), ((3039, 3071), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (3056, 3071), False, 'import random, os, database, json, requests\n'), ((3596, 3628), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (3613, 3628), False, 'import random, os, database, json, requests\n'), ((5485, 5517), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (5502, 5517), False, 'import random, os, database, json, requests\n'), ((6429, 6461), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (6446, 6461), False, 'import random, os, database, json, requests\n'), ((7657, 7689), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (7674, 7689), False, 'import random, os, database, json, requests\n'), ((8166, 8198), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (8183, 8198), False, 'import random, os, database, json, requests\n'), ((4232, 4264), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (4249, 4264), False, 'import random, os, database, json, requests\n'), ((5346, 5378), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (5363, 5378), False, 'import random, os, database, json, requests\n'), ((6290, 6322), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (6307, 6322), False, 'import random, os, database, json, requests\n'), ((3415, 3447), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (3432, 3447), False, 'import random, os, database, json, requests\n'), ((7027, 7059), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (7044, 7059), False, 'import random, os, database, json, requests\n'), ((3918, 3950), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (3935, 3950), False, 'import random, os, database, json, requests\n'), ((5026, 5058), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (5043, 5058), False, 'import random, os, database, json, requests\n'), ((7146, 7178), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (7163, 7178), False, 'import random, os, database, json, requests\n'), ((9097, 9129), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (9114, 9129), False, 'import random, os, database, json, requests\n'), ((2718, 2750), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (2735, 2750), False, 'import random, os, database, json, requests\n'), ((7261, 7293), 'database.Database', 'database.Database', (['"""config.json"""'], {}), "('config.json')\n", (7278, 7293), False, 'import random, os, database, json, requests\n')]
|
# -*- coding: utf-8 -*-
"""
Universidade Federal de Minas Gerais
Departamento de Ciência da Computação
Programa de Pós-Graduação em Ciência da Computação
Computação Natural
Trabalho Prático 1
Feito por <NAME>, 2016672212.
"""
#--------------------------------------------------------------#
#------------------------ CONSTANTES --------------------------#
#--------------------------------------------------------------#
# G0, G1.1, G1.2, G2.1, G2.2, G3
PHASE_START = [0, 1, 1, 2, 2, 4]
PHASE_END = [7, 13, 13, 15, 15, 17]
MOVES_SET = [ #-#-#-#-
['L', 'R', 'F', 'B', 'U', 'D' ], # G0 #
['L2', 'R2', 'F', 'B', 'U', 'D' ], # G1.1 #
['L2', 'R2', 'F', 'B', 'U', 'D' ], # G1.2 #
['L2', 'R2', 'F2', 'B2', 'U', 'D' ], # G2.1 #
['L2', 'R2', 'F2', 'B2', 'U', 'D' ], # G2.2 #
['F2', 'R2', 'F2', 'B2', 'U2', 'D2'] # G3 #
] #-#-#-#-
OP = {'O':'R','R':'O','W':'Y','Y':'W','G':'B','B':'G'}
INVERSE = {'F': 'Fi','L': 'Li','R': 'Ri','B': 'Bi',
'U': 'Ui','D': 'Di','Fi': 'F','Li': 'L','Ri': 'R',
'Bi': 'B','Ui': 'U','Di': 'D','F2': 'F2','L2': 'L2',
'R2': 'R2','B2': 'B2','U2': 'U2','D2': 'D2'}
SIMPLE_180 = {'F F2': 'Fi','L L2': 'Li','R R2': 'Ri',
'B B2': 'Bi','U U2': 'Ui','D D2': 'Di','F2 F': 'Fi',
'L2 L': 'Li','R2 R': 'Ri','B2 B': 'Bi','U2 U': 'Ui',
'D2 D': 'Di','Fi F2': 'F','Li L2': 'L','Ri R2': 'R',
'Bi B2': 'B','Ui U2': 'U','Di D2': 'D','F2 Fi': 'F',
'L2 Li': 'L','R2 Ri': 'R','B2 Bi': 'B','U2 Ui': 'U',
'D2 Di': 'D'}
#--------------------------------------------------------------#
import copy, random
from cube import Cube
class Color():
'''
Importante!
Esta classe se comporta como um 'Enum' à fim
de facilitar a troca de cores de referência
caso o arquivo de entrada esteja invertido
de alguma forma.
'''
TOP = 'Y'
BOTTOM = 'W'
FRONT = 'O'
BACK = 'R'
LEFT = 'G'
RIGHT = 'B'
class Individual(object):
def __init__(self, ind):
'''
Construtor do Indivíduo
Se o parâmetro passado for um indivíduo, é
feita então uma cópia.
'''
if isinstance(ind, Individual):
self.cube = copy.deepcopy(ind.cube)
self.genes = list(ind.genes)
self.fitness = ind.fitness
self.phase = ind.phase
self.size = ind.size
else:
self.cube = copy.deepcopy(ind)
self.genes = list()
self.fitness = -1
self.phase = 0
self.size = 0
def __repr__(self):
return self.__str__()
def __str__(self):
'''
Representação gráfica do indivíduo.
'''
self.cube.colored_printf()
return "{}[PH{}][L{}][F{}]".format(self.phase, self.genes, self.size, self.fitness)
def apply(self, new_moves):
'''
Este método aplica os novos movimentos gerados
pela mutação para o cubo que pertence à este
indivíduo.
'''
for gene in new_moves:
mode = 0 # Movimento horário
if len(gene) == 2:
mode = 1 if gene[1] == 'i' else 2 # Movimento anti-horário ou 180
if gene[0] == 'F':
self.cube.move_f(mode)
elif gene[0] == 'R':
self.cube.move_r(mode)
elif gene[0] == 'U':
self.cube.move_u(mode)
elif gene[0] == 'B':
self.cube.move_b(mode)
elif gene[0] == 'L':
self.cube.move_l(mode)
elif gene[0] == 'D':
self.cube.move_d(mode)
def mutation(self, phase):
'''
Este método cria uma nova jogada à ser aplicada no cubo
mágico, de acordo com a fase atual do algoritmo, temos
movimentos específicos e quantidade limitada, pelas listas
PHASE_START, PHASE_END e MOVES_SET. Após a criação a os
movimentos são "limpos" e acrescentados ao indivíduo.
'''
# Atualiza a fase e reseta o fitness
self.phase = phase
self.fitness = -1
# Geração aleatória de uma jogada
new_genes = list()
new_size = random.randint(PHASE_START[self.phase], PHASE_END[self.phase])
for i in range(new_size):
new_genes.append(random.choice(MOVES_SET[self.phase]))
# A jogada é aplicada ao cubo
self.apply(new_genes)
# Limpeza de movimentos
self.genes += new_genes
self.size += new_size
self.clean()
def clean(self):
'''
Este método recebe os novos movimentos
obtidos pela mutação e realiza uma limpeza
em busca de movimentos complementares ou
movimentos que não geram efeito final no cubo.
'''
i = 0
removed = 0
new_list = list(self.genes)
while i < self.size - removed - 1:
x = new_list[i]
y = new_list[i+1]
#-#-# Genes inversos seguidos são removidos #-#-#
if x == INVERSE[y]:
del new_list[i]
del new_list[i]
removed += 2
if i > 0:
i -= 1
#-#-# Genes iguais seguidos são convertidos para um gene 180 #-#-#
elif x == y:
del new_list[i]
new_list[i] = str(new_list[i][0]+'2')
removed += 1
if i > 0:
i -= 1
#-# Simplificação de um 90 e 180 para um 90 invertido #-#
elif str(x+' '+y) in SIMPLE_180:
del new_list[i]
new_list[i] = SIMPLE_180[str(x+' '+y)]
removed += 1
if i > 0:
i -= 1
else:
i += 1
#-#-#
self.genes = new_list
self.size -= removed
def get_fitness(self, phase):
'''
Cálculo da fitness
Recebe por parâmetro a fase atual do
algoritmo para realizar o cálculo corretamente.
'''
from main import CONST_PHASES
self.phase = phase
c = self.size
if self.fitness == -1:
result = 0
if self.phase == 0:
'''
Cálculo da fitness G0 -> G1
Os meios precisam ser orientados da maneira correta,
ou seja, é possível colocá-los em seus lugares sem uso
dos movimentos L e R.
'''
w = 0
#-#-# Mapeamento de todos os meios do cubo #-#-#
# Deu trabalho :(
edge_pieces = [
(self.cube.matrix[0][1][0], self.cube.matrix[1][1][2]), # O->G
(self.cube.matrix[0][1][2], self.cube.matrix[2][1][0]), # O->B
(self.cube.matrix[0][0][1], self.cube.matrix[4][2][1]), # O->Y
(self.cube.matrix[0][2][1], self.cube.matrix[5][0][1]), # O->W
(self.cube.matrix[3][1][0], self.cube.matrix[2][1][2]), # R->B
(self.cube.matrix[3][1][2], self.cube.matrix[1][1][0]), # R->G
(self.cube.matrix[3][0][1], self.cube.matrix[4][0][1]), # R->Y
(self.cube.matrix[3][2][1], self.cube.matrix[5][2][1]), # R->W
(self.cube.matrix[1][0][1], self.cube.matrix[4][1][0]), # G->Y
(self.cube.matrix[1][2][1], self.cube.matrix[5][1][0]), # G->W
(self.cube.matrix[2][0][1], self.cube.matrix[4][1][2]), # B->Y
(self.cube.matrix[2][2][1], self.cube.matrix[5][1][2]) # B->W
]
# A cada meio não-orientado, 1 ponto de punição.
for piece in edge_pieces:
if piece[0] in [Color.TOP, Color.BOTTOM]:
w += 1
elif piece[0] in [Color.LEFT, Color.RIGHT] and \
piece[1] in [Color.FRONT, Color.BACK]:
w += 1
# Parâmetros de multiplicação
# Aumenta ou diminui a pressão seletiva da fase.
result = (CONST_PHASES[0] * w) + c
elif self.phase == 1:
'''
Cálculo da fitness G1 -> G2 (Parte 1)
Nesta parte 1, é colocado os meios na camada do meio
apenas. Este processo facilita a convergência para o
real cálculo da fitness G1->G2 na parte 2.
'''
w = 0
#-#-# Punição por meios fora da camada do meio. #-#-#
f = self.cube.matrix[0][1] # Face da frente, camada do meio.
w += 0 if f[0] == Color.FRONT or f[0] == Color.BACK else 1
w += 0 if f[2] == Color.FRONT or f[2] == Color.BACK else 1
f = self.cube.matrix[3][1] # Face de trás, camada do meio.
w += 0 if f[0] == Color.FRONT or f[0] == Color.BACK else 1
w += 0 if f[2] == Color.FRONT or f[2] == Color.BACK else 1
f = self.cube.matrix[1][1] # Face da esquerda, camada do meio.
w += 0 if f[0] == Color.LEFT or f[0] == Color.RIGHT else 1
w += 0 if f[2] == Color.LEFT or f[2] == Color.RIGHT else 1
f = self.cube.matrix[2][1] # Face da direita, camada do meio.
w += 0 if f[0] == Color.LEFT or f[0] == Color.RIGHT else 1
w += 0 if f[2] == Color.LEFT or f[2] == Color.RIGHT else 1
# Parâmetros de multiplicação
# Aumenta ou diminui a pressão seletiva da fase.
result = (CONST_PHASES[1] * w) + c
elif self.phase == 2:
'''
Cálculo da fitness G1 -> G2 (Parte 2)
Todos as cores FRONT e BACK precisam estar
nas faces FRONT e BACK.
'''
# Mesmo código da fase 1 #
w = 0
#-#-# Punição por meios fora da camada do meio. #-#-#
f = self.cube.matrix[0][1] # Face da frente, camada do meio.
w += 0 if f[0] == Color.FRONT or f[0] == Color.BACK else 1
w += 0 if f[2] == Color.FRONT or f[2] == Color.BACK else 1
f = self.cube.matrix[3][1] # Face de trás, camada do meio.
w += 0 if f[0] == Color.FRONT or f[0] == Color.BACK else 1
w += 0 if f[2] == Color.FRONT or f[2] == Color.BACK else 1
f = self.cube.matrix[1][1] # Face da esquerda, camada do meio.
w += 0 if f[0] == Color.LEFT or f[0] == Color.RIGHT else 1
w += 0 if f[2] == Color.LEFT or f[2] == Color.RIGHT else 1
f = self.cube.matrix[2][1] # Face da direita, camada do meio.
w += 0 if f[0] == Color.LEFT or f[0] == Color.RIGHT else 1
w += 0 if f[2] == Color.LEFT or f[2] == Color.RIGHT else 1
result = (CONST_PHASES[1] * w) + c
# Fim do mesmo código da fase 1 #
v = 0
#-#-# Punição para cada canto não orientado. #-#-#
f = self.cube.matrix[4] # Face de cima
v += 0 if f[0][0] == Color.TOP or f[0][0] == Color.BOTTOM else 1
v += 0 if f[0][2] == Color.TOP or f[0][2] == Color.BOTTOM else 1
v += 0 if f[2][0] == Color.TOP or f[2][0] == Color.BOTTOM else 1
v += 0 if f[2][2] == Color.TOP or f[2][2] == Color.BOTTOM else 1
f = self.cube.matrix[5] # Face de baixo
v += 0 if f[0][0] == Color.TOP or f[0][0] == Color.BOTTOM else 1
v += 0 if f[0][2] == Color.TOP or f[0][2] == Color.BOTTOM else 1
v += 0 if f[2][0] == Color.TOP or f[2][0] == Color.BOTTOM else 1
v += 0 if f[2][2] == Color.TOP or f[2][2] == Color.BOTTOM else 1
# Parâmetros de multiplicação
# Aumenta ou diminui a pressão seletiva da fase.
result = (CONST_PHASES[2] * v) + result
elif self.phase == 3:
'''
Cálculo da fitness G2 -> G3 (Parte 1)
Todos as faces precisam ter sua cor original ou sua cor oposta,
além dos cantos vizinhos precisam compartilhar a mesma cor "lateral",
não importando o topo/baixo.
'''
y = 0
#-#-# Mapeamento de todos os cantos do cubo #-#-#
# Também deu trabalho :(
all_corners = [
(self.cube.matrix[0][0][0], self.cube.matrix[1][0][2]), #Y-O-G
(self.cube.matrix[0][2][0], self.cube.matrix[1][2][2]), #W-O-G
(self.cube.matrix[0][0][2], self.cube.matrix[2][0][0]), #Y-O-B
(self.cube.matrix[0][2][2], self.cube.matrix[2][2][0]), #W-O-B
(self.cube.matrix[3][0][0], self.cube.matrix[2][0][2]), #Y-R-B
(self.cube.matrix[3][2][0], self.cube.matrix[2][2][2]), #W-R-B
(self.cube.matrix[3][0][2], self.cube.matrix[1][0][0]), #Y-R-G
(self.cube.matrix[3][2][2], self.cube.matrix[1][2][0]), #W-R-G
]
#-#-# Punição para cada canto da camada superior que não combina
# sua cor com o canto da camada inferior (formando uma "coluna"). #-#-#
for i in range(0, 8, 2):
if all_corners[i][0] != all_corners[i+1][0] or \
all_corners[i][1] != all_corners[i+1][1]:
y += 1
# Parâmetros de multiplicação
# Aumenta ou diminui a pressão seletiva da fase.
result = (CONST_PHASES[3] * y) + c
elif self.phase == 4:
x, y = 0, 0
# Mesmo código da fase 3 #
#-#-# Mapeamento de todos os cantos do cubo #-#-#
# Também deu trabalho :(
all_corners = [
(self.cube.matrix[0][0][0], self.cube.matrix[1][0][2]), #Y-O-G
(self.cube.matrix[0][2][0], self.cube.matrix[1][2][2]), #W-O-G
(self.cube.matrix[0][0][2], self.cube.matrix[2][0][0]), #Y-O-B
(self.cube.matrix[0][2][2], self.cube.matrix[2][2][0]), #W-O-B
(self.cube.matrix[3][0][0], self.cube.matrix[2][0][2]), #Y-R-B
(self.cube.matrix[3][2][0], self.cube.matrix[2][2][2]), #W-R-B
(self.cube.matrix[3][0][2], self.cube.matrix[1][0][0]), #Y-R-G
(self.cube.matrix[3][2][2], self.cube.matrix[1][2][0]), #W-R-G
]
#-#-# Punição para cada canto da camada superior que não combina
# sua cor com o canto da camada inferior (formando uma "coluna"). #-#-#
for i in range(0, 8, 2):
if all_corners[i][0] != all_corners[i+1][0] or \
all_corners[i][1] != all_corners[i+1][1]:
y += 1
result = (CONST_PHASES[3] * y) + c
# Fim do mesmo código da fase 3 #
#-#-# Recebe uma punição cada cor de cubo que não é a
# cor da correta ou não é a cor oposta da face. #-#-#
for face in self.cube.matrix:
center = face[1][1]
for i in range(3):
for j in range(3):
if face[i][j] != center and face[i][j] != OP[center]:
x += 1
# Parâmetros de multiplicação
# Aumenta ou diminui a pressão seletiva da fase.
result = (CONST_PHASES[4] * x) + result
elif self.phase == 5:
'''
Cálculo da fitness G3 -> G4 (Resolvido)
Agora apenas movimentos de 180 graus são permitidos, a função
de fitness simplesmente olha a cor de cada cubo e verifica com
o centro.
'''
z = 0
#-#-# Fase final, recebe uma punição por cada cor
# que não é a cor da face atual. #-#-#
for face in self.cube.matrix:
center = face[1][1]
for i in range(3):
for j in range(3):
if face[i][j] != center:
z += 1
# Parâmetros de multiplicação
# Aumenta ou diminui a pressão seletiva da fase.
result = (CONST_PHASES[5] * z) + c
self.fitness = result
return self.fitness
|
[
"copy.deepcopy",
"random.choice",
"random.randint"
] |
[((4218, 4280), 'random.randint', 'random.randint', (['PHASE_START[self.phase]', 'PHASE_END[self.phase]'], {}), '(PHASE_START[self.phase], PHASE_END[self.phase])\n', (4232, 4280), False, 'import copy, random\n'), ((2233, 2256), 'copy.deepcopy', 'copy.deepcopy', (['ind.cube'], {}), '(ind.cube)\n', (2246, 2256), False, 'import copy, random\n'), ((2444, 2462), 'copy.deepcopy', 'copy.deepcopy', (['ind'], {}), '(ind)\n', (2457, 2462), False, 'import copy, random\n'), ((4344, 4380), 'random.choice', 'random.choice', (['MOVES_SET[self.phase]'], {}), '(MOVES_SET[self.phase])\n', (4357, 4380), False, 'import copy, random\n')]
|
import pytest
import pandas as pd
from src.clients.s3_client import S3Client
from src.sources.data_loader import DataLoader
def test_init():
dl = DataLoader("test_source", "test_client")
assert dl.data_source == "test_source"
assert dl.client == "test_client"
# def test_load_when_database_client():
# db_client = DatabaseClient(temp=True)
# db_client.connect()
# db_client.execute("CREATE TABLE test_table (name TEXT)")
# db_client.execute("INSERT INTO test_table(name) VALUES ('test_name')")
# dl = DataLoader("test_table", client=db_client)
# data, structured, name = dl.load()
# assert isinstance(data, pd.DataFrame)
# assert data.columns == "name"
# assert data.values == ["test_name"]
# assert structured
# assert name == "test_table"
#
#
# def test_load_when_s3_file_specified():
# with pytest.raises(NotImplementedError):
# dl = DataLoader("test", S3Client())
# dl.load()
def test_load_when_csv_file_specified():
dl = DataLoader("src/tests/test_data/sample/names.csv", client=None)
data, structured, name = dl.load()
assert isinstance(data, pd.DataFrame)
assert data.columns == ["name"]
assert len(data.values) == 250
assert structured
assert name == "src/tests/test_data/sample/names.csv"
def test_load_when_dataframe_specified():
test_df = pd.read_csv("src/tests/test_data/sample/names.csv")
dl = DataLoader(test_df, client=None)
data, structured, name = dl.load()
assert isinstance(data, pd.DataFrame)
assert data.columns == ["name"]
assert len(data.values) == 250
assert structured
assert name == "pandas DataFrame (hash 5214317343533855748)"
def test_load_when_txt_file_specified():
dl = DataLoader("src/tests/test_data/sample/email.txt", client=None)
data, structured, name = dl.load()
assert isinstance(data, str)
assert data.startswith("Dear Mr. Connell")
assert not structured
assert name == "src/tests/test_data/sample/email.txt"
def test_load_when_pdf_file_specified():
dl = DataLoader("src/tests/test_data/sample/academic_paper.pdf", client=None)
data, structured, name = dl.load()
assert isinstance(data, str)
assert data.startswith("Enriching Word Vectors")
assert not structured
assert name == "src/tests/test_data/sample/academic_paper.pdf"
def test_load_when_xml_file_specified():
dl = DataLoader("src/tests/test_data/sample/employees.xml", client=None)
data, structured, name = dl.load()
assert isinstance(data, pd.DataFrame)
assert len(data) == 6
assert len(data.columns) == 9
assert structured
assert name == "src/tests/test_data/sample/employees.xml"
def test_load_when_xls_file_specified():
dl = DataLoader("src/tests/test_data/sample/dummy.xls", client=None)
data, structured, name = dl.load()
assert isinstance(data, pd.DataFrame)
assert len(data) == 4
assert len(data.columns) == 3
assert structured
assert name == "src/tests/test_data/sample/dummy.xls"
def test_load_when_xlsx_file_specified():
dl = DataLoader("src/tests/test_data/sample/dummy.xlsx", client=None)
data, structured, name = dl.load()
assert isinstance(data, pd.DataFrame)
assert len(data) == 4
assert len(data.columns) == 3
assert structured
assert name == "src/tests/test_data/sample/dummy.xlsx"
def test_load_when_multi_sheet_xlsx():
dl = DataLoader("src/tests/test_data/sample/dummy_two_sheets.xlsx", client=None)
with pytest.raises(NotImplementedError):
dl.load()
|
[
"pandas.read_csv",
"pytest.raises",
"src.sources.data_loader.DataLoader"
] |
[((154, 194), 'src.sources.data_loader.DataLoader', 'DataLoader', (['"""test_source"""', '"""test_client"""'], {}), "('test_source', 'test_client')\n", (164, 194), False, 'from src.sources.data_loader import DataLoader\n'), ((1014, 1077), 'src.sources.data_loader.DataLoader', 'DataLoader', (['"""src/tests/test_data/sample/names.csv"""'], {'client': 'None'}), "('src/tests/test_data/sample/names.csv', client=None)\n", (1024, 1077), False, 'from src.sources.data_loader import DataLoader\n'), ((1368, 1419), 'pandas.read_csv', 'pd.read_csv', (['"""src/tests/test_data/sample/names.csv"""'], {}), "('src/tests/test_data/sample/names.csv')\n", (1379, 1419), True, 'import pandas as pd\n'), ((1429, 1461), 'src.sources.data_loader.DataLoader', 'DataLoader', (['test_df'], {'client': 'None'}), '(test_df, client=None)\n', (1439, 1461), False, 'from src.sources.data_loader import DataLoader\n'), ((1753, 1816), 'src.sources.data_loader.DataLoader', 'DataLoader', (['"""src/tests/test_data/sample/email.txt"""'], {'client': 'None'}), "('src/tests/test_data/sample/email.txt', client=None)\n", (1763, 1816), False, 'from src.sources.data_loader import DataLoader\n'), ((2072, 2144), 'src.sources.data_loader.DataLoader', 'DataLoader', (['"""src/tests/test_data/sample/academic_paper.pdf"""'], {'client': 'None'}), "('src/tests/test_data/sample/academic_paper.pdf', client=None)\n", (2082, 2144), False, 'from src.sources.data_loader import DataLoader\n'), ((2415, 2482), 'src.sources.data_loader.DataLoader', 'DataLoader', (['"""src/tests/test_data/sample/employees.xml"""'], {'client': 'None'}), "('src/tests/test_data/sample/employees.xml', client=None)\n", (2425, 2482), False, 'from src.sources.data_loader import DataLoader\n'), ((2760, 2823), 'src.sources.data_loader.DataLoader', 'DataLoader', (['"""src/tests/test_data/sample/dummy.xls"""'], {'client': 'None'}), "('src/tests/test_data/sample/dummy.xls', client=None)\n", (2770, 2823), False, 'from src.sources.data_loader import DataLoader\n'), ((3098, 3162), 'src.sources.data_loader.DataLoader', 'DataLoader', (['"""src/tests/test_data/sample/dummy.xlsx"""'], {'client': 'None'}), "('src/tests/test_data/sample/dummy.xlsx', client=None)\n", (3108, 3162), False, 'from src.sources.data_loader import DataLoader\n'), ((3435, 3510), 'src.sources.data_loader.DataLoader', 'DataLoader', (['"""src/tests/test_data/sample/dummy_two_sheets.xlsx"""'], {'client': 'None'}), "('src/tests/test_data/sample/dummy_two_sheets.xlsx', client=None)\n", (3445, 3510), False, 'from src.sources.data_loader import DataLoader\n'), ((3520, 3554), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (3533, 3554), False, 'import pytest\n')]
|
from fairseq.models.roberta import RobertaModel
label_fn = lambda label: roberta.task.label_dictionary.string(
[label + roberta.task.label_dictionary.nspecial]
)
roberta = RobertaModel.from_pretrained(
model_name_or_path='/path/to/checkpoints',
checkpoint_file='checkpoint_best.pt',
data_name_or_path='/path/to/data'
)
roberta.eval()
tokens = roberta.encode('china is a great country')
pred = roberta.predict_label("kaggle_ner", tokens, return_logits=True)
print(pred[1][0])
print([label_fn(int(p)) for p in pred[1][0].tolist()])
|
[
"fairseq.models.roberta.RobertaModel.from_pretrained"
] |
[((177, 325), 'fairseq.models.roberta.RobertaModel.from_pretrained', 'RobertaModel.from_pretrained', ([], {'model_name_or_path': '"""/path/to/checkpoints"""', 'checkpoint_file': '"""checkpoint_best.pt"""', 'data_name_or_path': '"""/path/to/data"""'}), "(model_name_or_path='/path/to/checkpoints',\n checkpoint_file='checkpoint_best.pt', data_name_or_path='/path/to/data')\n", (205, 325), False, 'from fairseq.models.roberta import RobertaModel\n')]
|
from evasdk import EvaAutoRenewError, EvaError
import time
import pytest
# TODO: this rely on having an actual robot, should be rewritten to be mockable
@pytest.mark.robot_required
class TestAuth:
def test_create_new_session(self, eva):
token = eva.auth_create_session()
assert(len(token) == 36)
assert(token == eva._Eva__http_client.session_token)
def test_invalidate_session(self, eva):
# start a new session, then invalidate it
token = eva.auth_create_session()
eva.auth_invalidate_session()
# this should automatically start a new, different session
eva.users_get()
assert(token != eva._Eva__http_client.session_token)
@pytest.mark.slow
def test_auto_renew_error(self, eva):
api_token = eva._Eva__http_client.api_token
eva._Eva__http_client.api_token = ''
# Ensure it will try to auto-renew
eva.auth_invalidate_session()
time.sleep(3 * 60)
got_auto_renew_error = False
try:
# Won't get a 401, as no session required for this endpoint
eva.api_call_with_auth('GET', '_/init')
except EvaAutoRenewError:
got_auto_renew_error = True
finally:
eva._Eva__http_client.api_token = api_token
assert(got_auto_renew_error)
def test_lock_with_no_existing_session(self, eva):
try:
eva.auth_invalidate_session()
except EvaError:
# could fail if session is already invalidated, so ignore!
pass
with eva.lock():
eva.gpio_set('d1', not eva.gpio_get('d1', 'output'))
@pytest.mark.slow
def test_auto_renew(self, locked_eva):
for _ in range(7):
locked_eva.gpio_set('d1', not locked_eva.gpio_get('d1', 'output'))
time.sleep(5 * 60)
|
[
"time.sleep"
] |
[((961, 979), 'time.sleep', 'time.sleep', (['(3 * 60)'], {}), '(3 * 60)\n', (971, 979), False, 'import time\n'), ((1844, 1862), 'time.sleep', 'time.sleep', (['(5 * 60)'], {}), '(5 * 60)\n', (1854, 1862), False, 'import time\n')]
|
#!/usr/bin/env python
from gevent import monkey # isort:skip
monkey.patch_all() # isort:skip
import argparse
import os
import time
from dataclasses import dataclass
from typing import Iterator, List
from raiden.utils.nursery import Janitor, Nursery
CWD = os.path.dirname(os.path.abspath(__file__))
GENERATE_MESSAGES_SCRIPT = os.path.join(CWD, "generate_messages.py")
@dataclass
class Config:
logdir: str
sender_matrix_server_url: str
receiver_matrix_server_url: str
target_qty_of_chat_rooms: int
qty_of_new_rooms_per_iteration: int
concurrent_messages_per_room: int
wait_before_next_iteration: float
def batch_size(target: int, step: int) -> Iterator[int]:
iterations = target // step
for _ in range(iterations):
yield step
rest = target % step
if rest:
yield rest
def run(config: Config, nursery: Nursery) -> None:
for i, qty_of_rooms in enumerate(
batch_size(config.target_qty_of_chat_rooms, config.qty_of_new_rooms_per_iteration)
):
log_file = os.path.join(config.logdir, str(i))
script_args: List[str] = [
GENERATE_MESSAGES_SCRIPT,
"--concurrent-messages",
str(config.concurrent_messages_per_room),
"--chat-rooms",
str(qty_of_rooms),
log_file,
config.sender_matrix_server_url,
config.receiver_matrix_server_url,
]
nursery.exec_under_watch(script_args)
time.sleep(config.wait_before_next_iteration)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--wait-before-next-iteration", type=int, default=60)
parser.add_argument("target_qty_of_chat_rooms", type=int, default=500)
parser.add_argument("qty_of_new_rooms_per_iteration", type=int, default=10)
parser.add_argument("concurrent_messages_per_room", type=int, default=50)
parser.add_argument("logdir", help="Directory used to save the script logs.")
parser.add_argument("server", help="Matrix server used by the sender user.")
parser.add_argument(
"server2",
help=(
"If provided, the server used by the receiever, otherwise the same "
"server as the sender is used."
),
default=None,
nargs="?",
)
args = parser.parse_args()
logdir = args.logdir
os.makedirs(logdir, exist_ok=True)
sender_matrix_server_url = args.server
receiver_matrix_server_url = args.server2 or args.server
config = Config(
logdir=logdir,
sender_matrix_server_url=sender_matrix_server_url,
receiver_matrix_server_url=receiver_matrix_server_url,
target_qty_of_chat_rooms=args.target_qty_of_chat_rooms,
qty_of_new_rooms_per_iteration=args.qty_of_new_rooms_per_iteration,
concurrent_messages_per_room=args.concurrent_messages_per_room,
wait_before_next_iteration=args.wait_before_next_iteration,
)
with Janitor() as nursery:
nursery.spawn_under_watch(run, config, nursery)
nursery.wait(timeout=None)
if __name__ == "__main__":
main()
|
[
"os.path.abspath",
"os.makedirs",
"argparse.ArgumentParser",
"raiden.utils.nursery.Janitor",
"gevent.monkey.patch_all",
"time.sleep",
"os.path.join"
] |
[((63, 81), 'gevent.monkey.patch_all', 'monkey.patch_all', ([], {}), '()\n', (79, 81), False, 'from gevent import monkey\n'), ((331, 372), 'os.path.join', 'os.path.join', (['CWD', '"""generate_messages.py"""'], {}), "(CWD, 'generate_messages.py')\n", (343, 372), False, 'import os\n'), ((277, 302), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (292, 302), False, 'import os\n'), ((1565, 1590), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1588, 1590), False, 'import argparse\n'), ((2368, 2402), 'os.makedirs', 'os.makedirs', (['logdir'], {'exist_ok': '(True)'}), '(logdir, exist_ok=True)\n', (2379, 2402), False, 'import os\n'), ((1484, 1529), 'time.sleep', 'time.sleep', (['config.wait_before_next_iteration'], {}), '(config.wait_before_next_iteration)\n', (1494, 1529), False, 'import time\n'), ((2971, 2980), 'raiden.utils.nursery.Janitor', 'Janitor', ([], {}), '()\n', (2978, 2980), False, 'from raiden.utils.nursery import Janitor, Nursery\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.