code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import struct
import numpy as np
from .nbt import NBTFile
import io
class BufferDecoder(object):
def __init__(self,bytes) -> None:
self.bytes=bytes
self.curr=0
def read_var_uint32(self):
# 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧??
i,v=0,0
while i<35:
b=self.read_byte()
v|=(b&0x7f)<<i
if b&0x80==0:
return v
i+=7
assert False,f'read_var_uint32 fail i:{i} v:{v} {self}'
def read_var_int32(self):
v_=self.read_var_uint32()
v= np.int32(v_>>1)
if (v_&1)!=0:
v=~v
return int(v)
def read_var_uint64(self):
# 我nm真的有必要为了几个比特省到这种地步吗??uint32最多也就5个比特吧??
i,v=0,0
while i<70:
b=self.read_byte()
v|=(b&0x7f)<<i
if b&0x80==0:
return v
i+=7
assert False,f'read_var_uint64 fail i:{i} v:{v} {self}'
def read_var_int64(self):
v_=self.read_var_uint64()
v= np.int64(v_>>1)
if (v_&1)!=0:
v=~v
return int(v)
def read_vec3(self):
self.curr+=12
return struct.unpack('fff',self.bytes[self.curr-12:self.curr])
def read_float32(self):
self.curr+=4
return struct.unpack('f',self.bytes[self.curr-4:self.curr])[0]
def read_tail(self):
return self.bytes[self.curr:]
def read_byte(self):
self.curr+=1
return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0]
def read_boolen(self):
return self.read_byte()==1
def read_str(self):
length=self.read_var_uint32()
self.curr+=length
return self.bytes[self.curr-length:self.curr].decode(encoding='utf-8')
@staticmethod
def reverseUUIDBytes(bytes):
bytes[8:]+bytes[:8]
return bytes
def read_UUID(self):
self.curr+=16
uuid_bytes=self.bytes[self.curr-16:self.curr]
return self.reverseUUIDBytes(uuid_bytes)
def read_uint8(self):
self.curr+=1
return struct.unpack('B',self.bytes[self.curr-1:self.curr])[0]
def read_int16(self):
self.curr+=2
return struct.unpack('h',self.bytes[self.curr-2:self.curr])[0]
def read_int32(self):
self.curr+=4
return struct.unpack('i',self.bytes[self.curr-4:self.curr])[0]
def read_uint32(self):
self.curr+=4
return struct.unpack('I',self.bytes[self.curr-4:self.curr])[0]
def read_bytes(self,_len):
self.curr+=_len
return self.bytes[self.curr-_len:self.curr]
def read(self,_len):
self.curr+=_len
return self.bytes[self.curr-_len:self.curr]
def read_nbt(self,_len=None):
if _len==None:
nbt=NBTFile(self)
return nbt.to_py()
else:
self.curr+=_len
bio=io.BytesIO(self.bytes[self.curr-_len:self.curr])
nbt=NBTFile(bio)
return nbt.to_py()
class BufferEncoder(object):
def __init__(self) -> None:
self._bytes_elements=[]
self._bytes_elements_count=0
self._bytes=b''
@property
def bytes(self):
if len(self._bytes_elements)!=self._bytes_elements_count:
self._bytes+=b''.join(self._bytes_elements[self._bytes_elements_count:])
self._bytes_elements_count=len(self._bytes_elements)
return self._bytes
def append(self,bs:bytes):
self._bytes_elements.append(bs)
def write_float32(self,f):
self.append(struct.pack('f',f))
def write_byte(self,b):
self.append(struct.pack('B',b))
def write_boolen(self,b:bool):
self.append(struct.pack('B',b))
def write_uint32(self,i:int):
self.append(struct.pack('I',i))
def write_var_uint32(self,x):
while x>=0x80:
self.write_byte(int((x%128)+0x80))
x>>=7
self.write_byte(x)
def write_var_int32(self,x):
uv=np.uint32(np.uint32(x)<<1)
if x<0:
uv=~uv
self.write_var_uint32(uv)
def write_var_uint64(self,x):
while x>=0x80:
self.write_byte(int((x%128)+0x80))
x//=128
self.write_byte(int(x))
def write_var_int64(self,x):
uv=np.uint64(np.uint64(x)*2)
if x<0:
uv=~uv
self.write_var_uint64(uv)
def write_str(self,s:str):
es=s.encode(encoding='utf-8')
self.write_var_uint32(len(es))
self.append(es)
def write_UUID_bytes(self,uuid_bytes:bytes):
self.append(uuid_bytes)
|
[
"numpy.uint32",
"io.BytesIO",
"numpy.uint64",
"struct.unpack",
"struct.pack",
"numpy.int32",
"numpy.int64"
] |
[((574, 591), 'numpy.int32', 'np.int32', (['(v_ >> 1)'], {}), '(v_ >> 1)\n', (582, 591), True, 'import numpy as np\n'), ((1036, 1053), 'numpy.int64', 'np.int64', (['(v_ >> 1)'], {}), '(v_ >> 1)\n', (1044, 1053), True, 'import numpy as np\n'), ((1175, 1233), 'struct.unpack', 'struct.unpack', (['"""fff"""', 'self.bytes[self.curr - 12:self.curr]'], {}), "('fff', self.bytes[self.curr - 12:self.curr])\n", (1188, 1233), False, 'import struct\n'), ((1295, 1350), 'struct.unpack', 'struct.unpack', (['"""f"""', 'self.bytes[self.curr - 4:self.curr]'], {}), "('f', self.bytes[self.curr - 4:self.curr])\n", (1308, 1350), False, 'import struct\n'), ((1480, 1535), 'struct.unpack', 'struct.unpack', (['"""B"""', 'self.bytes[self.curr - 1:self.curr]'], {}), "('B', self.bytes[self.curr - 1:self.curr])\n", (1493, 1535), False, 'import struct\n'), ((2077, 2132), 'struct.unpack', 'struct.unpack', (['"""B"""', 'self.bytes[self.curr - 1:self.curr]'], {}), "('B', self.bytes[self.curr - 1:self.curr])\n", (2090, 2132), False, 'import struct\n'), ((2195, 2250), 'struct.unpack', 'struct.unpack', (['"""h"""', 'self.bytes[self.curr - 2:self.curr]'], {}), "('h', self.bytes[self.curr - 2:self.curr])\n", (2208, 2250), False, 'import struct\n'), ((2313, 2368), 'struct.unpack', 'struct.unpack', (['"""i"""', 'self.bytes[self.curr - 4:self.curr]'], {}), "('i', self.bytes[self.curr - 4:self.curr])\n", (2326, 2368), False, 'import struct\n'), ((2432, 2487), 'struct.unpack', 'struct.unpack', (['"""I"""', 'self.bytes[self.curr - 4:self.curr]'], {}), "('I', self.bytes[self.curr - 4:self.curr])\n", (2445, 2487), False, 'import struct\n'), ((2876, 2926), 'io.BytesIO', 'io.BytesIO', (['self.bytes[self.curr - _len:self.curr]'], {}), '(self.bytes[self.curr - _len:self.curr])\n', (2886, 2926), False, 'import io\n'), ((3559, 3578), 'struct.pack', 'struct.pack', (['"""f"""', 'f'], {}), "('f', f)\n", (3570, 3578), False, 'import struct\n'), ((3632, 3651), 'struct.pack', 'struct.pack', (['"""B"""', 'b'], {}), "('B', b)\n", (3643, 3651), False, 'import struct\n'), ((3712, 3731), 'struct.pack', 'struct.pack', (['"""B"""', 'b'], {}), "('B', b)\n", (3723, 3731), False, 'import struct\n'), ((3791, 3810), 'struct.pack', 'struct.pack', (['"""I"""', 'i'], {}), "('I', i)\n", (3802, 3810), False, 'import struct\n'), ((4028, 4040), 'numpy.uint32', 'np.uint32', (['x'], {}), '(x)\n', (4037, 4040), True, 'import numpy as np\n'), ((4342, 4354), 'numpy.uint64', 'np.uint64', (['x'], {}), '(x)\n', (4351, 4354), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) IBM Corporation 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
grid_analyzer.py:
- This script post-processes the output of the ``GridTrainers`` and ``GridTesters``. \
It gathers the test results into one `.csv` file.
"""
__author__ = "<NAME> & <NAME>"
import os
import csv
import yaml
import torch
import logging
from datetime import datetime
from miprometheus.grid_workers.grid_worker import GridWorker
class GridAnalyzer(GridWorker):
"""
Implementation of the :py:class:`miprometheus.grid_workers.GridAnalyzer`.
Post-processes the test results of a grid of experiments and gather them in a csv file.
This csv file will gather the training statistics (seeds, accuracies, terminal conditions,...), \
the validation statistics and the test statistics.
Inherits from :py:class:`miprometheus.grid_workers.GridWorker`.
"""
def __init__(self, name="GridAnalyzer"):
"""
Constructor for the :py:class:`miprometheus.grid_workers.GridAnalyzer`:
- Calls basic constructor of :py:class:`miprometheus.grid_workers.GridWorker`
:param name: Name of the worker (DEFAULT: "GridAnalyzer").
:type name: str
"""
# call base constructor
super(GridAnalyzer, self).__init__(name=name, use_gpu=False)
@staticmethod
def check_if_file_exists(dir_, filename_):
"""
Checks if ``filename_`` exists in ``dir_``.
:param dir_: Path to file.
:type dir_: str
:param filename_: Name of the file to be opened and analysed.
:type filename_: str
:return: True if the file exists in the directory, else False
"""
return os.path.isfile(os.path.join(dir_, filename_))
def check_file_content(self, dir_, filename_):
"""
Checks if the number of lines in the file is > 1.
:param dir_: Path to file.
:type dir_: str
:param filename_: Name of the file to be opened and analyzed.
:type filename_: str
:return: True if the number of lines in the file is strictly greater than one.
"""
return self.get_lines_number(os.path.join(dir_, filename_)) > 1
@staticmethod
def get_lines_number(filename_):
"""
Returns the number of lines in ``filename_``.
:param filename_: Filepath to be opened and line-read.
:type filename_: str
:return: Number of lines in the file.
"""
with open(filename_) as f:
return sum(1 for _ in f)
def get_experiment_tests(self, experiment_path_):
"""
Returns a list of folders containing valid test experiments data:
- A configuration (`testing_configuration.yaml`),
- A csv file containing a data point for the aggregated statistics (`testing_set_agg_statistics.csv`)
:param experiment_path_: Path to experiment (training) folder.
:type experiment_path_: str
:return: A list of valid test experiment folders.
"""
experiments_tests = []
for root, dirs, _ in os.walk(experiment_path_, topdown=True):
for name in dirs:
experiments_tests.append(os.path.join(root, name))
# Keep only the folders that contain a test configuration file and a csv statistics file.
experiments_tests = [elem for elem in experiments_tests if
self.check_if_file_exists(elem, 'testing_configuration.yaml') and
self.check_if_file_exists(elem, 'testing_set_agg_statistics.csv')]
# Check if the csv file contains at least one data point.
experiments_tests = [elem for elem in experiments_tests if
self.check_file_content(elem, 'testing_set_agg_statistics.csv')]
return experiments_tests
def setup_grid_experiment(self):
"""
Setups the overall experiment:
- Parses arguments and sets logger level,
- Checks the presence of experiments folder,
- Recursively traverses the experiment folders, cherry-picking subfolders containing:
- (a) 'training_configuration.yaml' (training configuration file),
- (b) 'models/model_best.pt' (checkpoint of the best saved model).
"""
# Parse arguments.
self.flags, self.unparsed = self.parser.parse_known_args()
# Set logger depending on the settings.
self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None))
# Check if experiments directory was indicated.
if self.flags.expdir == '':
print('Please pass the experiments directory as --expdir')
exit(-1)
# Get experiment directory.
self.experiment_rootdir = self.flags.expdir
# Get all sub-directories paths in expdir.
self.experiments_list = []
for root, dirs, _ in os.walk(self.experiment_rootdir, topdown=True):
for name in dirs:
self.experiments_list.append(os.path.join(root, name))
# Keep only the folders that contain training_configuration.yaml, training_statistics.csv and
# training.csv and model (which contains aggregated validation statistics).
self.experiments_list = [elem for elem in self.experiments_list if
self.check_if_file_exists(elem, 'training_configuration.yaml') and
self.check_if_file_exists(elem, 'models/model_best.pt')]
# Check if there are some valid folders.
if len(self.experiments_list) == 0:
self.logger.error("There are no valid experiment folders in {} directory!".format(self.experiment_rootdir))
exit(-2)
# List folders with "valid" experiment data.
exp_str = "Found the following valid experiments in directory: {} \n".format(self.experiment_rootdir)
exp_str += '='*80 + '\n'
for exp in self.experiments_list:
exp_str += " - {}\n".format(exp)
exp_str += '='*80 + '\n'
self.logger.info(exp_str)
# Ask for confirmation - optional.
if self.flags.user_confirm:
try:
input('Press <Enter> to confirm and start the grid analyzis\n')
except KeyboardInterrupt:
exit(0)
def run_experiment(self, experiment_path: str):
"""
Collects the training / validation / test statistics for a given experiment path.
Analyzes whether the given training experiment folder contains subfolders with test experiments data:
- Loads and parses training configuration file,
- Loads checkpoint with model and training and validation statistics,
- Recursively traverses subdirectories looking for test experiments,
.. note::
We require that the test statistics csv files are valid, i.e. contain at least one line with \
collected statistics (excluding the header).
- Collects statistics from training, validation (from model checkpoint) and test experiments \
(from test csv files found in subdirectories).
:param experiment_path: Path to an experiment folder containing a training statistics.
:type experiment_path: str
:return: Four dictionaries containing:
- Status info (model, problem etc.),
- Training statistics,
- Validation statistics,
- Test statistics.
"""
self.logger.info('Analyzing experiments from: {}'.format(experiment_path))
# Create dictionaries.
status_dict = dict()
train_dict = dict()
valid_dict = dict()
# Load yaml file, to get model name, problem name and random seeds.
with open(os.path.join(experiment_path, 'training_configuration.yaml'), 'r') as yaml_file:
params = yaml.load(yaml_file)
# Get problem and model names - from config.
status_dict['problem'] = params['testing']['problem']['name']
status_dict['model'] = params['model']['name']
# Load checkpoint from model file.
chkpt = torch.load(os.path.join(experiment_path, 'models/model_best.pt'),
map_location=lambda storage, loc: storage)
status_dict['model_save_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['model_timestamp'])
status_dict['training_terminal_status'] = chkpt['status']
status_dict['training_terminal_status_timestamp'] = '{0:%Y%m%d_%H%M%S}'.format(chkpt['status_timestamp'])
# Create "empty" equivalent.
status_dict_empty = dict.fromkeys(status_dict.keys(), ' ')
# Copy training status stats.
train_dict['training_configuration_filepath'] = os.path.join(experiment_path, 'training_configuration.yaml')
train_dict['training_start_timestamp'] = os.path.basename(os.path.normpath(experiment_path))
train_dict['training_seed_torch'] = params['training']['seed_torch']
train_dict['training_seed_numpy'] = params['training']['seed_numpy']
# Copy the training statistics from the checkpoint and add the 'train_' prefix.
for key, value in chkpt['training_stats'].items():
train_dict['training_{}'.format(key)] = value
# Create "empty" equivalent.
train_dict_empty = dict.fromkeys(train_dict.keys(), ' ')
# Copy the validation statistics from the checkpoint and add the 'valid_' prefix.
for key, value in chkpt['validation_stats'].items():
valid_dict['validation_{}'.format(key)] = value
# Create "empty" equivalent.
valid_dict_empty = dict.fromkeys(valid_dict.keys(), ' ')
# Get all tests for a given training experiment.
experiments_tests = self.get_experiment_tests(experiment_path)
list_test_dicts = []
if len(experiments_tests) > 0:
self.logger.info(' - Found {} test(s)'.format(len(experiments_tests)))
# "Expand" status, train and valid dicts by empty ones, prop. to the number of test folders.
list_status_dicts = [status_dict, *[status_dict_empty for _ in range(len(experiments_tests) - 1)]]
list_train_dicts = [train_dict, *[train_dict_empty for _ in range(len(experiments_tests) - 1)]]
list_valid_dicts = [valid_dict, *[valid_dict_empty for _ in range(len(experiments_tests) - 1)]]
# Get tests statistics.
for experiment_test_path in experiments_tests:
self.logger.info(' - Analyzing test from: {}'.format(experiment_test_path))
# Create test dict:
test_dict = dict()
test_dict['test_configuration_filepath'] = os.path.join(experiment_test_path, 'testing_set_agg_statistics.yaml')
test_dict['test_start_timestamp'] = os.path.basename(os.path.normpath(experiment_test_path))[5:]
# Load yaml file and get random seeds.
with open(os.path.join(experiment_test_path, 'testing_configuration.yaml'), 'r') as yaml_file:
test_params = yaml.load(yaml_file)
# Get seeds.
test_dict['test_seed_torch'] = test_params['testing']['seed_torch']
test_dict['test_seed_numpy'] = test_params['testing']['seed_numpy']
# Load csv file and copy test statistics
with open(os.path.join(experiment_test_path, 'testing_set_agg_statistics.csv'), mode='r') as f:
# Open file.
test_reader = csv.DictReader(f)
# Copy training statistics.
for row in test_reader:
for key, value in row.items():
test_dict['test_{}'.format(key)] = value
list_test_dicts.append(test_dict)
else:
self.logger.info(' - Could not find any valid tests')
list_status_dicts = [status_dict]
list_train_dicts = [train_dict]
list_valid_dicts = [valid_dict]
# Add "empty test entry"
list_test_dicts.append({})
# Return all dictionaries with lists
return list_status_dicts, list_train_dicts, list_valid_dicts, list_test_dicts
@staticmethod
def merge_list_dicts(list_dicts):
"""
Merges a list of dictionaries by filling the missing fields with spaces into one dict.
:param list_dicts: List of dictionaries, potentially containing different headers, which will be merged.
:type list_dicts: list
:return: dict, resulting of the merge.
"""
# Create a "unified" header.
header = set(k for d in list_dicts for k in d)
# Create an "empty" dict from the unified header.
empty_dict = {k: ' ' for k in header}
# "Fill" all lists with empty gaps.
list_filled_dicts = []
for i, _ in enumerate(list_dicts):
list_filled_dicts.append({**empty_dict, **(list_dicts[i])})
# Zip lists of dicts.
final_dict = dict(zip(header, zip(*[d.values() for d in list_filled_dicts])))
# Return the result.
return final_dict
def run_grid_experiment(self):
"""
Collects four list of dicts from each experiment path contained in ``self.experiments_lists``.
Merges all them together and saves result to a single csv file.
"""
try:
# Go through the experiments one by one and collect data.
list_statuses = []
list_trains = []
list_valids = []
list_tests = []
for exp in self.experiments_list:
statuses, trains, valids, tests = self.run_experiment(exp)
list_statuses.extend(statuses)
list_trains.extend(trains)
list_valids.extend(valids)
list_tests.extend(tests)
# Merge lists.
statuses = self.merge_list_dicts(list_statuses)
trains = self.merge_list_dicts(list_trains)
valids = self.merge_list_dicts(list_valids)
tests = self.merge_list_dicts(list_tests)
# Merge everything into one big dictionary..
exp_values = {**statuses, **trains, **valids, **tests}
# create results file
results_file = os.path.join(self.experiment_rootdir,
"{0:%Y%m%d_%H%M%S}_grid_analysis.csv".format(datetime.now()))
with open(results_file, "w") as outfile:
writer = csv.writer(outfile, delimiter=',')
writer.writerow(exp_values.keys())
writer.writerows(zip(*exp_values.values()))
self.logger.info('Analysis finished')
self.logger.info('Results stored in {}.'.format(results_file))
except KeyboardInterrupt:
self.logger.info('Grid analysis interrupted!')
def main():
"""
Entry point function for the :py:class:`miprometheus.grid_workers.GridAnalyzer`.
"""
grid_analyzer = GridAnalyzer()
# parse args, load configuration and create all required objects.
grid_analyzer.setup_grid_experiment()
# GO!
grid_analyzer.run_grid_experiment()
if __name__ == '__main__':
main()
|
[
"yaml.load",
"csv.writer",
"csv.DictReader",
"os.walk",
"datetime.datetime.now",
"os.path.normpath",
"os.path.join"
] |
[((3681, 3720), 'os.walk', 'os.walk', (['experiment_path_'], {'topdown': '(True)'}), '(experiment_path_, topdown=True)\n', (3688, 3720), False, 'import os\n'), ((5537, 5583), 'os.walk', 'os.walk', (['self.experiment_rootdir'], {'topdown': '(True)'}), '(self.experiment_rootdir, topdown=True)\n', (5544, 5583), False, 'import os\n'), ((9478, 9538), 'os.path.join', 'os.path.join', (['experiment_path', '"""training_configuration.yaml"""'], {}), "(experiment_path, 'training_configuration.yaml')\n", (9490, 9538), False, 'import os\n'), ((2281, 2310), 'os.path.join', 'os.path.join', (['dir_', 'filename_'], {}), '(dir_, filename_)\n', (2293, 2310), False, 'import os\n'), ((8600, 8620), 'yaml.load', 'yaml.load', (['yaml_file'], {}), '(yaml_file)\n', (8609, 8620), False, 'import yaml\n'), ((8871, 8924), 'os.path.join', 'os.path.join', (['experiment_path', '"""models/model_best.pt"""'], {}), "(experiment_path, 'models/model_best.pt')\n", (8883, 8924), False, 'import os\n'), ((9605, 9638), 'os.path.normpath', 'os.path.normpath', (['experiment_path'], {}), '(experiment_path)\n', (9621, 9638), False, 'import os\n'), ((2732, 2761), 'os.path.join', 'os.path.join', (['dir_', 'filename_'], {}), '(dir_, filename_)\n', (2744, 2761), False, 'import os\n'), ((8498, 8558), 'os.path.join', 'os.path.join', (['experiment_path', '"""training_configuration.yaml"""'], {}), "(experiment_path, 'training_configuration.yaml')\n", (8510, 8558), False, 'import os\n'), ((11472, 11541), 'os.path.join', 'os.path.join', (['experiment_test_path', '"""testing_set_agg_statistics.yaml"""'], {}), "(experiment_test_path, 'testing_set_agg_statistics.yaml')\n", (11484, 11541), False, 'import os\n'), ((15404, 15438), 'csv.writer', 'csv.writer', (['outfile'], {'delimiter': '""","""'}), "(outfile, delimiter=',')\n", (15414, 15438), False, 'import csv\n'), ((3793, 3817), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (3805, 3817), False, 'import os\n'), ((5660, 5684), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (5672, 5684), False, 'import os\n'), ((11856, 11876), 'yaml.load', 'yaml.load', (['yaml_file'], {}), '(yaml_file)\n', (11865, 11876), False, 'import yaml\n'), ((12358, 12375), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (12372, 12375), False, 'import csv\n'), ((15308, 15322), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15320, 15322), False, 'from datetime import datetime\n'), ((11611, 11649), 'os.path.normpath', 'os.path.normpath', (['experiment_test_path'], {}), '(experiment_test_path)\n', (11627, 11649), False, 'import os\n'), ((11737, 11801), 'os.path.join', 'os.path.join', (['experiment_test_path', '"""testing_configuration.yaml"""'], {}), "(experiment_test_path, 'testing_configuration.yaml')\n", (11749, 11801), False, 'import os\n'), ((12205, 12273), 'os.path.join', 'os.path.join', (['experiment_test_path', '"""testing_set_agg_statistics.csv"""'], {}), "(experiment_test_path, 'testing_set_agg_statistics.csv')\n", (12217, 12273), False, 'import os\n')]
|
import numpy as np
from pycocotools_local.coco import *
import os.path as osp
from .utils import to_tensor, random_scale
from mmcv.parallel import DataContainer as DC
import mmcv
from .custom import CustomDataset
class CocoDatasetRGB2(CustomDataset):
CLASSES = ('microbleed', 'full_bounding_box')
def load_annotations(self, ann_file):
self.coco = COCO(ann_file)
self.cat_ids = self.coco.getCatIds()
self.cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(self.cat_ids)
}
self.img_ids = self.coco.getImgIds()
img_infos = []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info['filename'] = info['file_name']
img_infos.append(info)
return img_infos
def get_ann_info(self, idx):
img_id = self.img_infos[idx]['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
return self._parse_ann_info(ann_info, self.with_mask)
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
for i, img_info in enumerate(self.img_infos):
if self.img_ids[i] not in ids_with_ann:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, ann_info, with_mask=True):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, mask_polys, poly_lens.
"""
slices_ann_info = {'r': [], 'g': [], 'b': []}
for info in ann_info:
if info['slice_label'] == 'r':
slices_ann_info['r'].append(info)
elif info['slice_label'] == 'g':
slices_ann_info['g'].append(info)
elif info['slice_label'] == 'b':
slices_ann_info['b'].append(info)
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
# Two formats are provided.
# 1. mask: a binary map of the same size of the image.
# 2. polys: each mask consists of one or several polys, each poly is a
# list of float.
if with_mask:
gt_masks = []
gt_mask_polys = []
gt_poly_lens = []
for key in slices_ann_info:
cur_ann_info = slices_ann_info[key]
cur_slice_bboxes = []
cur_slice_labels = []
cur_slice_bboxes_ignore = []
cur_masks = []
cur_mask_polys = []
cur_poly_lens = []
for i, ann in enumerate(cur_ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
if ann['area'] <= 0 or w < 1 or h < 1:
continue
bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
if ann['iscrowd']:
cur_slice_bboxes_ignore.append(bbox)
else:
cur_slice_bboxes.append(bbox)
cur_slice_labels.append(self.cat2label[ann['category_id']])
if with_mask:
cur_masks.append(self.coco.annToMask(ann))
mask_polys = [
p for p in ann['segmentation'] if len(p) >= 6
] # valid polygons have >= 3 points (6 coordinates)
poly_lens = [len(p) for p in mask_polys]
cur_mask_polys.append(mask_polys)
cur_poly_lens.extend(poly_lens)
if cur_slice_bboxes:
cur_slice_bboxes = np.array(cur_slice_bboxes, dtype=np.float32)
cur_slice_labels = np.array(cur_slice_labels, dtype=np.int64)
else:
cur_slice_bboxes = np.zeros((0, 4), dtype=np.float32)
cur_slice_labels = np.array([], dtype=np.int64)
if cur_slice_bboxes_ignore:
cur_slice_bboxes_ignore = np.array(cur_slice_bboxes_ignore, dtype=np.float32)
else:
cur_slice_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
gt_bboxes.append(cur_slice_bboxes)
gt_labels.append(cur_slice_labels)
gt_bboxes_ignore.append(cur_slice_bboxes_ignore)
gt_masks.append(cur_masks)
gt_mask_polys.append(cur_mask_polys)
gt_poly_lens.append(cur_poly_lens)
ann = dict(
bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore)
if with_mask:
ann['masks'] = gt_masks
# poly format is not used in the current implementation
ann['mask_polys'] = gt_mask_polys
ann['poly_lens'] = gt_poly_lens
return ann
def insert_to_dict(self, data, key, tensors):
if key in data:
data[key].append(tensors)
else:
data[key] = [tensors]
def prepare_train_img(self, idx):
img_info = self.img_infos[idx]
# load image
orig_img = mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
# load proposals if necessary
if self.proposals is not None:
proposals = self.proposals[idx][:self.num_max_proposals]
# TODO: Handle empty proposals properly. Currently images with
# no proposals are just ignored, but they can be used for
# training in concept.
if len(proposals) == 0:
return None
if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposals.shape))
if proposals.shape[1] == 5:
scores = proposals[:, 4, None]
proposals = proposals[:, :4]
else:
scores = None
ann = self.get_ann_info(idx)
gt_bboxes_list = ann['bboxes']
gt_labels_list = ann['labels']
# if self.with_crowd:
gt_bboxes_ignore_list = ann['bboxes_ignore']
gt_masks_list = ann['masks']
# apply transforms
flip = True if np.random.rand() < self.flip_ratio else False
data = None
for gt_bboxes, gt_labels, gt_bboxes_ignore, gt_masks in zip(gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list, gt_masks_list):
# skip the image if there is no valid gt bbox
if len(gt_bboxes) == 0:
return None
# extra augmentation
if self.extra_aug is not None:
img, gt_bboxes, gt_labels = self.extra_aug(orig_img, gt_bboxes,
gt_labels)
else:
img = orig_img
# randomly sample a scale
img_scale = random_scale(self.img_scales, self.multiscale_mode)
img, img_shape, pad_shape, scale_factor = self.img_transform(
img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
img = img.copy()
if self.with_seg:
gt_seg = mmcv.imread(
osp.join(self.seg_prefix, img_info['file_name'].replace(
'jpg', 'png')),
flag='unchanged')
gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)
gt_seg = mmcv.imrescale(
gt_seg, self.seg_scale_factor, interpolation='nearest')
gt_seg = gt_seg[None, ...]
if self.proposals is not None:
proposals = self.bbox_transform(proposals, img_shape, scale_factor,
flip)
proposals = np.hstack(
[proposals, scores]) if scores is not None else proposals
gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,
flip)
if self.with_crowd:
gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
scale_factor, flip)
if self.with_mask:
gt_masks = self.mask_transform(gt_masks, pad_shape,
scale_factor, flip)
if data is None:
ori_shape = (img_info['height'], img_info['width'], 3)
img_meta = dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip,
image_id=img_info['id'])
data = dict(
img=DC(to_tensor(img), stack=True),
img_meta=DC(img_meta, cpu_only=True))
self.insert_to_dict(data, 'gt_bboxes', DC(to_tensor(gt_bboxes)))
if self.proposals is not None:
self.insert_to_dict(data, 'proposals', DC(to_tensor(proposals)))
if self.with_label:
self.insert_to_dict(data, 'gt_labels', DC(to_tensor(gt_labels)))
if self.with_crowd:
self.insert_to_dict(data, 'gt_bboxes_ignore', DC(to_tensor(gt_bboxes_ignore)))
if self.with_mask:
self.insert_to_dict(data, 'gt_masks', DC(gt_masks, cpu_only=True))
if self.with_seg:
self.insert_to_dict(data, 'gt_semantic_seg', DC(to_tensor(gt_seg), stack=True))
return data
|
[
"numpy.zeros",
"numpy.hstack",
"numpy.array",
"mmcv.parallel.DataContainer",
"numpy.random.rand",
"mmcv.imrescale",
"os.path.join"
] |
[((5455, 5502), 'os.path.join', 'osp.join', (['self.img_prefix', "img_info['filename']"], {}), "(self.img_prefix, img_info['filename'])\n", (5463, 5502), True, 'import os.path as osp\n'), ((4019, 4063), 'numpy.array', 'np.array', (['cur_slice_bboxes'], {'dtype': 'np.float32'}), '(cur_slice_bboxes, dtype=np.float32)\n', (4027, 4063), True, 'import numpy as np\n'), ((4099, 4141), 'numpy.array', 'np.array', (['cur_slice_labels'], {'dtype': 'np.int64'}), '(cur_slice_labels, dtype=np.int64)\n', (4107, 4141), True, 'import numpy as np\n'), ((4195, 4229), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (4203, 4229), True, 'import numpy as np\n'), ((4265, 4293), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (4273, 4293), True, 'import numpy as np\n'), ((4377, 4428), 'numpy.array', 'np.array', (['cur_slice_bboxes_ignore'], {'dtype': 'np.float32'}), '(cur_slice_bboxes_ignore, dtype=np.float32)\n', (4385, 4428), True, 'import numpy as np\n'), ((4489, 4523), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.float32'}), '((0, 4), dtype=np.float32)\n', (4497, 4523), True, 'import numpy as np\n'), ((6612, 6628), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6626, 6628), True, 'import numpy as np\n'), ((7833, 7903), 'mmcv.imrescale', 'mmcv.imrescale', (['gt_seg', 'self.seg_scale_factor'], {'interpolation': '"""nearest"""'}), "(gt_seg, self.seg_scale_factor, interpolation='nearest')\n", (7847, 7903), False, 'import mmcv\n'), ((8177, 8207), 'numpy.hstack', 'np.hstack', (['[proposals, scores]'], {}), '([proposals, scores])\n', (8186, 8207), True, 'import numpy as np\n'), ((9823, 9850), 'mmcv.parallel.DataContainer', 'DC', (['gt_masks'], {'cpu_only': '(True)'}), '(gt_masks, cpu_only=True)\n', (9825, 9850), True, 'from mmcv.parallel import DataContainer as DC\n'), ((9254, 9281), 'mmcv.parallel.DataContainer', 'DC', (['img_meta'], {'cpu_only': '(True)'}), '(img_meta, cpu_only=True)\n', (9256, 9281), True, 'from mmcv.parallel import DataContainer as DC\n')]
|
from random import random
from math import cos, sin, floor, sqrt, pi, ceil
def euclidean_distance(a, b):
dx = a[0] - b[0]
dy = a[1] - b[1]
return sqrt(dx * dx + dy * dy)
def poisson_disc_samples(width, height, r, k=5, distance=euclidean_distance, random=random):
tau = 2 * pi
cellsize = r / sqrt(2)
grid_width = int(ceil(width / cellsize))
grid_height = int(ceil(height / cellsize))
grid = [None] * (grid_width * grid_height)
def grid_coords(p):
return int(floor(p[0] / cellsize)), int(floor(p[1] / cellsize))
def fits(p, gx, gy):
yrange = list(range(max(gy - 2, 0), min(gy + 3, grid_height)))
for x in range(max(gx - 2, 0), min(gx + 3, grid_width)):
for y in yrange:
g = grid[x + y * grid_width]
if g is None:
continue
if distance(p, g) <= r:
return False
return True
p = width * random(), height * random()
queue = [p]
grid_x, grid_y = grid_coords(p)
grid[grid_x + grid_y * grid_width] = p
while queue:
qi = int(random() * len(queue))
qx, qy = queue[qi]
queue[qi] = queue[-1]
queue.pop()
for _ in range(k):
alpha = tau * random()
d = r * sqrt(3 * random() + 1)
px = qx + d * cos(alpha)
py = qy + d * sin(alpha)
if not (0 <= px < width and 0 <= py < height):
continue
p = (px, py)
grid_x, grid_y = grid_coords(p)
if not fits(p, grid_x, grid_y):
continue
queue.append(p)
grid[grid_x + grid_y * grid_width] = p
return [p for p in grid if p is not None]
|
[
"math.sqrt",
"math.ceil",
"math.floor",
"math.sin",
"random.random",
"math.cos"
] |
[((160, 183), 'math.sqrt', 'sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (164, 183), False, 'from math import cos, sin, floor, sqrt, pi, ceil\n'), ((315, 322), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (319, 322), False, 'from math import cos, sin, floor, sqrt, pi, ceil\n'), ((345, 367), 'math.ceil', 'ceil', (['(width / cellsize)'], {}), '(width / cellsize)\n', (349, 367), False, 'from math import cos, sin, floor, sqrt, pi, ceil\n'), ((391, 414), 'math.ceil', 'ceil', (['(height / cellsize)'], {}), '(height / cellsize)\n', (395, 414), False, 'from math import cos, sin, floor, sqrt, pi, ceil\n'), ((965, 973), 'random.random', 'random', ([], {}), '()\n', (971, 973), False, 'from random import random\n'), ((984, 992), 'random.random', 'random', ([], {}), '()\n', (990, 992), False, 'from random import random\n'), ((507, 529), 'math.floor', 'floor', (['(p[0] / cellsize)'], {}), '(p[0] / cellsize)\n', (512, 529), False, 'from math import cos, sin, floor, sqrt, pi, ceil\n'), ((536, 558), 'math.floor', 'floor', (['(p[1] / cellsize)'], {}), '(p[1] / cellsize)\n', (541, 558), False, 'from math import cos, sin, floor, sqrt, pi, ceil\n'), ((1123, 1131), 'random.random', 'random', ([], {}), '()\n', (1129, 1131), False, 'from random import random\n'), ((1276, 1284), 'random.random', 'random', ([], {}), '()\n', (1282, 1284), False, 'from random import random\n'), ((1354, 1364), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (1357, 1364), False, 'from math import cos, sin, floor, sqrt, pi, ceil\n'), ((1391, 1401), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (1394, 1401), False, 'from math import cos, sin, floor, sqrt, pi, ceil\n'), ((1314, 1322), 'random.random', 'random', ([], {}), '()\n', (1320, 1322), False, 'from random import random\n')]
|
from crayon import benchmark
benchmark("deepar.yml", "deepar", benchmark_id="deepar_100", runs=100)
|
[
"crayon.benchmark"
] |
[((30, 100), 'crayon.benchmark', 'benchmark', (['"""deepar.yml"""', '"""deepar"""'], {'benchmark_id': '"""deepar_100"""', 'runs': '(100)'}), "('deepar.yml', 'deepar', benchmark_id='deepar_100', runs=100)\n", (39, 100), False, 'from crayon import benchmark\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python toolkit for generating and analyzing nanostructure data"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
__docformat__ = 'restructuredtext en'
import os
import sys
import shutil
import subprocess
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
raise RuntimeError("Python version 3.4+ required.\n\n"
"Sorry, but there are features of Python 3\n"
"that I want to take advantage of and without\n"
"worrying about Python 2 compatibility.\n"
"Therefore, Python 2 support was removed starting\n"
"in v0.3.7. Once/if I learn how to automate the\n"
"backporting process from the setup script,\n"
"I will restore Python 2 support that way.\n"
"Until then, if you must install this for Python 2\n"
"you're on your own. It shouldn't be difficult\n"
"but you'll have to manually backport the package\n"
"source code using a Python 3 to Python 2\n"
"compatibility library such as the python `future`\n"
"module, which provides a python script called\n"
"`pasteurize` that can be run on the source\n"
"directory to automate the backporting process.\n"
"You'll also need to hack this setup script\n"
"to remove any exceptions that are raised when\n"
"executed under Python 2.")
#if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 4):
if (3, 0) <= sys.version_info[:2] < (3, 4):
raise RuntimeError("Python 3.4+ required.")
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
try:
import setuptools
except ImportError:
sys.exit("setuptools required for Python3 install.\n"
"`pip install --upgrade setuptools`")
DISTNAME = 'scikit-nano'
DESCRIPTION = __doc__
LONG_DESCRIPTION = ''.join(open('README.rst').readlines()[6:])
AUTHOR = '<NAME>'
AUTHOR_EMAIL = '<EMAIL>'
MAINTAINER = AUTHOR
MAINTAINER_EMAIL = AUTHOR_EMAIL
URL = 'http://scikit-nano.org/doc'
DOWNLOAD_URL = 'http://github.com/androomerrill/scikit-nano'
KEYWORDS = ['nano', 'nanoscience', 'nano-structure', 'nanostructure',
'nanotube', 'graphene', 'LAMMPS', 'XYZ', 'structure',
'analysis']
LICENSE = 'BSD 2-Clause'
CLASSIFIERS = """\
Development Status :: 4 - Beta
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
Programming Language :: Python
Programming Language :: Python :: 3.4
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Chemistry
Topic :: Scientific/Engineering :: Physics
Topic :: Scientific/Engineering :: Visualization
Topic :: Software Development
Topic :: Software Development :: Libraries :: Python Modules
"""
MAJOR = 0
MINOR = 3
MICRO = 21
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
STABLEVERSION = None
if STABLEVERSION is None:
if ISRELEASED:
STABLEVERSION = VERSION
else:
STABLEVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO - 1)
# Return the GIT version as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
# This is a bit (!) hackish: we are setting a global variable so that the main
# sknano __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet.
builtins.__SKNANO_SETUP__ = True
class CleanCommand(Clean):
description = \
"Remove build directories, __pycache__ directories, " \
".ropeproject directories, and compiled files in the source tree."
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sknano'):
for filename in filenames:
if filename.endswith(('.so', '.pyd', '.pyc', '.dll')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname in ('__pycache__', '.ropeproject'):
shutil.rmtree(os.path.join(dirpath, dirname))
for dirpath, dirnames, filenames in os.walk('doc'):
for dirname in dirnames:
if dirname in ('__pycache__', '.ropeproject'):
shutil.rmtree(os.path.join(dirpath, dirname))
def get_version_info():
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of sknano.version messes
# up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('sknano/version.py'):
# must be a source distribution, use existing version file
# load it as a separate module to not load sknano/__init__.py
import imp
version = imp.load_source('sknano.version', 'sknano/version.py')
GIT_REVISION = version.git_revision
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
# FULLVERSION += '.dev'
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='sknano/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM SCIKIT-NANO SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
stable_version = '%(stable_version)s'
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED),
'stable_version': STABLEVERSION})
finally:
a.close()
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sknano')
config.get_version('sknano/version.py')
return config
def setup_package():
# Rewrite the version file everytime
write_version_py()
# Figure out whether to add ``*_requires = ['numpy>=`min version`',
# 'scipy>=`min version`']``. We don't want to do that unconditionally,
# because we risk updating an installed numpy/scipy which fails too often.
# Just if the minimum version is not installed, we may give it a try.
build_requires = []
try:
import numpy
numpy_version = \
tuple(
list(map(int, numpy.version.short_version.split('.')[:3]))[:2])
if numpy_version < (1, 9):
raise RuntimeError
except (AttributeError, ImportError, RuntimeError):
build_requires += ['numpy==1.10.1']
install_requires = build_requires[:]
try:
import scipy
scipy_version = \
tuple(
list(map(int, scipy.version.short_version.split('.')[:3]))[:2])
if scipy_version < (0, 14):
raise RuntimeError
except (AttributeError, ImportError, RuntimeError):
install_requires += ['scipy==0.16.1']
# # Add six module to install_requires (used in numpydoc git submodule)
# install_requires += ['six>=1.9']
# # Add future module to install requires
# install_requires += ['future>=0.14.3']
install_requires += ['monty>=0.7.0', 'pymatgen>=3.2.4']
metadata = dict(
name=DISTNAME,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
keywords=KEYWORDS,
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='nose.collector',
setup_requires=build_requires,
install_requires=install_requires,
extras_require={
'plotting': ['matplotlib>=1.4.3', 'palettable>=2.1.1']
},
entry_points={
'console_scripts': [
'analyze_structure = sknano.scripts.analyze_structure:main',
'nanogen = sknano.scripts.nanogen:main',
'nanogenui = sknano.scripts.nanogenui:main',
'sknano = sknano.scripts.sknano:main'],
},
cmdclass={'clean': CleanCommand},
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
if len(sys.argv) >= 2 and \
('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean')):
# For these actions, NumPy/SciPy are not required.
# They are required to succeed without them when, for example,
# pip is used to install Scipy when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
FULLVERSION, GIT_REVISION = get_version_info()
metadata['version'] = FULLVERSION
else:
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == '__main__':
setup_package()
|
[
"os.remove",
"subprocess.Popen",
"distutils.core.setup",
"scipy.version.short_version.split",
"distutils.command.clean.clean.run",
"os.path.exists",
"os.walk",
"os.environ.get",
"imp.load_source",
"shutil.rmtree",
"numpy.distutils.misc_util.Configuration",
"os.path.join",
"numpy.version.short_version.split",
"sys.exit"
] |
[((4346, 4372), 'os.path.exists', 'os.path.exists', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (4360, 4372), False, 'import os\n'), ((4378, 4399), 'os.remove', 'os.remove', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (4387, 4399), False, 'import os\n'), ((5804, 5826), 'os.path.exists', 'os.path.exists', (['""".git"""'], {}), "('.git')\n", (5818, 5826), False, 'import os\n'), ((7225, 7270), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['None', 'parent_package', 'top_path'], {}), '(None, parent_package, top_path)\n', (7238, 7270), False, 'from numpy.distutils.misc_util import Configuration\n'), ((10846, 10863), 'distutils.core.setup', 'setup', ([], {}), '(**metadata)\n', (10851, 10863), False, 'from distutils.core import setup\n'), ((2029, 2130), 'sys.exit', 'sys.exit', (['"""setuptools required for Python3 install.\n`pip install --upgrade setuptools`"""'], {}), '(\n """setuptools required for Python3 install.\n`pip install --upgrade setuptools`"""\n )\n', (2037, 2130), False, 'import sys\n'), ((4866, 4881), 'distutils.command.clean.clean.run', 'Clean.run', (['self'], {}), '(self)\n', (4875, 4881), True, 'from distutils.command.clean import clean as Clean\n'), ((4893, 4916), 'os.path.exists', 'os.path.exists', (['"""build"""'], {}), "('build')\n", (4907, 4916), False, 'import os\n'), ((4997, 5014), 'os.walk', 'os.walk', (['"""sknano"""'], {}), "('sknano')\n", (5004, 5014), False, 'import os\n'), ((5400, 5414), 'os.walk', 'os.walk', (['"""doc"""'], {}), "('doc')\n", (5407, 5414), False, 'import os\n'), ((5874, 5909), 'os.path.exists', 'os.path.exists', (['"""sknano/version.py"""'], {}), "('sknano/version.py')\n", (5888, 5909), False, 'import os\n'), ((3702, 3719), 'os.environ.get', 'os.environ.get', (['k'], {}), '(k)\n', (3716, 3719), False, 'import os\n'), ((4930, 4952), 'shutil.rmtree', 'shutil.rmtree', (['"""build"""'], {}), "('build')\n", (4943, 4952), False, 'import shutil\n'), ((6085, 6139), 'imp.load_source', 'imp.load_source', (['"""sknano.version"""', '"""sknano/version.py"""'], {}), "('sknano.version', 'sknano/version.py')\n", (6100, 6139), False, 'import imp\n'), ((3911, 3965), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'env': 'env'}), '(cmd, stdout=subprocess.PIPE, env=env)\n', (3927, 3965), False, 'import subprocess\n'), ((5156, 5187), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (5168, 5187), False, 'import os\n'), ((5323, 5353), 'os.path.join', 'os.path.join', (['dirpath', 'dirname'], {}), '(dirpath, dirname)\n', (5335, 5353), False, 'import os\n'), ((5550, 5580), 'os.path.join', 'os.path.join', (['dirpath', 'dirname'], {}), '(dirpath, dirname)\n', (5562, 5580), False, 'import os\n'), ((8092, 8130), 'numpy.version.short_version.split', 'numpy.version.short_version.split', (['"""."""'], {}), "('.')\n", (8125, 8130), False, 'import numpy\n'), ((8455, 8493), 'scipy.version.short_version.split', 'scipy.version.short_version.split', (['"""."""'], {}), "('.')\n", (8488, 8493), False, 'import scipy\n')]
|
import arcade
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 600
MOVEMENT_SPEED = 5
class Player(arcade.Sprite):
def update(self):
self.center_x += self.change_x
if self.left < 0:
self.left = 0
elif self.right > SCREEN_WIDTH - 1:
self.right = SCREEN_WIDTH - 1
class MyGame(arcade.Window):
def __init__(self, width, height, title):
super().__init__(width, height, title)
arcade.set_background_color(arcade.color.WHEAT)
self.player = Player('player.png', 0.5)
self.player.center_y = 20
self.all_sprites_list = arcade.SpriteList()
self.all_sprites_list.append(self.player)
def on_draw(self):
arcade.start_render()
self.all_sprites_list.draw()
def update(self, delta_time):
self.all_sprites_list.update()
self.player.change_x = MOVEMENT_SPEED
def main():
game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, 'Coin Game')
arcade.run()
return game
if __name__ == '__main__':
main()
|
[
"arcade.SpriteList",
"arcade.start_render",
"arcade.set_background_color",
"arcade.run"
] |
[((958, 970), 'arcade.run', 'arcade.run', ([], {}), '()\n', (968, 970), False, 'import arcade\n'), ((436, 483), 'arcade.set_background_color', 'arcade.set_background_color', (['arcade.color.WHEAT'], {}), '(arcade.color.WHEAT)\n', (463, 483), False, 'import arcade\n'), ((599, 618), 'arcade.SpriteList', 'arcade.SpriteList', ([], {}), '()\n', (616, 618), False, 'import arcade\n'), ((701, 722), 'arcade.start_render', 'arcade.start_render', ([], {}), '()\n', (720, 722), False, 'import arcade\n')]
|
import os
def get_host(service: str):
'''
Retrieves the host. (Helps with debugging locally)
- Arguments:
- service: a Docker service
- Returns:
a string of either localhost or a Docker service
'''
inside_docker = os.environ.get('IS_DOCKER_CONTAINER', False)
return service if inside_docker else 'localhost'
|
[
"os.environ.get"
] |
[((258, 302), 'os.environ.get', 'os.environ.get', (['"""IS_DOCKER_CONTAINER"""', '(False)'], {}), "('IS_DOCKER_CONTAINER', False)\n", (272, 302), False, 'import os\n')]
|
#!/usr/bin/env python
from build import ninja_common
build = ninja_common.Build("fishbowl/body-frame-calc")
files = [
'main.cpp',
]
build.build_cmd('auv-body-frame-calc',
files,
pkg_confs=['eigen3'],
auv_deps=[],
lflags=[],
cflags=[])
|
[
"build.ninja_common.Build"
] |
[((63, 109), 'build.ninja_common.Build', 'ninja_common.Build', (['"""fishbowl/body-frame-calc"""'], {}), "('fishbowl/body-frame-calc')\n", (81, 109), False, 'from build import ninja_common\n')]
|
from flask import request
from flask import abort
from flask import url_for
from flask import render_template
from bson.objectid import ObjectId
from certifico import app
from certifico import mongo
from certifico import redis_queue
from certifico.mail import send_email
from certifico.forms import CertificateForm
def create_certificate():
form = CertificateForm()
if form.validate_on_submit():
certificate = mongo.db.certificates.insert_one({
'logo': form.data['logo'],
'message': form.data['message'],
'participants': form.participants_list
})
for p in form.participants_list:
redis_queue.enqueue(
send_email,
to_email=p.get('email'),
certificateLink=url_for(
'print_certificate',
certificate=certificate.inserted_id,
email=p.get('email'),
_external=True
)
)
return 'Os certificados do evento %s foram ' \
'enviados.' % certificate.inserted_id
return render_template('index.html', form=form,
analytics=app.config.get('GOOGLE_ANALYTICS')), 400
def print_certificate(certificate):
email = request.args.get('email')
if not email:
return abort(404)
certificate = mongo.db.certificates.find_one_or_404({
'_id': ObjectId(certificate)
})
try:
participant = next(filter(
lambda p: p.get('email') == email, certificate.get('participants')
))
except StopIteration:
return abort(404)
message = certificate.get('message')
message = message.replace(
'[participante]',
participant.get('name').upper())
return render_template(
'print.html',
logo=certificate.get('logo'),
message=message
)
|
[
"certifico.app.config.get",
"bson.objectid.ObjectId",
"flask.request.args.get",
"certifico.mongo.db.certificates.insert_one",
"flask.abort",
"certifico.forms.CertificateForm"
] |
[((356, 373), 'certifico.forms.CertificateForm', 'CertificateForm', ([], {}), '()\n', (371, 373), False, 'from certifico.forms import CertificateForm\n'), ((1290, 1315), 'flask.request.args.get', 'request.args.get', (['"""email"""'], {}), "('email')\n", (1306, 1315), False, 'from flask import request\n'), ((431, 569), 'certifico.mongo.db.certificates.insert_one', 'mongo.db.certificates.insert_one', (["{'logo': form.data['logo'], 'message': form.data['message'], 'participants':\n form.participants_list}"], {}), "({'logo': form.data['logo'], 'message':\n form.data['message'], 'participants': form.participants_list})\n", (463, 569), False, 'from certifico import mongo\n'), ((1350, 1360), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (1355, 1360), False, 'from flask import abort\n'), ((1435, 1456), 'bson.objectid.ObjectId', 'ObjectId', (['certificate'], {}), '(certificate)\n', (1443, 1456), False, 'from bson.objectid import ObjectId\n'), ((1640, 1650), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (1645, 1650), False, 'from flask import abort\n'), ((1199, 1233), 'certifico.app.config.get', 'app.config.get', (['"""GOOGLE_ANALYTICS"""'], {}), "('GOOGLE_ANALYTICS')\n", (1213, 1233), False, 'from certifico import app\n')]
|
'''
Created on 2016. 10. 26.
@author: "comfact"
'''
import re
from .model import *
def deployACI(desc, verbose=False, debug=False):
try: dom_ip = desc['Controller']['ip']
except: exit(1)
try: dom_user = desc['Controller']['user']
except: exit(1)
try: dom_pwd = desc['Controller']['pwd']
except: exit(1)
try: delete_empty_tenant = desc['Option']['deleteEmptyTenant']
except: delete_empty_tenant = False
try: deploy_incremental = desc['Option']['deployIncremental']
except: deploy_incremental = False
try:
dom = Controller(dom_ip, dom_user, dom_pwd, debug=debug)
except:
if verbose: print('Connection Failed : %s, %s, %s\n' % (dom_ip, dom_user, dom_pwd))
exit(1)
if verbose: print('Get Controller : %s, %s, %s\n' % (dom_ip, dom_user, dom_pwd))
common = dom.Tenant('common')
tenant_objs = {}
flt_objs = {}
ctr_objs = {}
ctx_objs = {}
l3e_objs = {}
bd_objs = {}
fe_objs = {}
sj_objs = {}
sn_objs = {}
ap_objs = {}
epg_objs = {}
delete_tenants = []
def parse_desc_unit(unit):
ret = {}
for key in unit:
if re.search('^[a-z]\w*', key): ret[key] = unit[key]
return ret
tenant_list = desc['Tenant'] if 'Tenant' in desc and isinstance(desc['Tenant'], list) else []
for tenant in tenant_list:
tenant_obj = dom.Tenant.create(**parse_desc_unit(tenant))
tenant_objs[tenant_obj['dn']] = tenant_obj
if verbose: print('UPDATE >> fvTenant.dn=%s\n' % tenant_obj['dn'])
tenant_flt_objs = {}
tenant_ctr_objs = {}
tenant_ctx_objs = {}
tenant_l3e_objs = {}
tenant_bd_objs = {}
tenant_fe_objs = {}
tenant_sj_objs = {}
tenant_sn_objs = {}
tenant_ap_objs = {}
tenant_epg_objs = {}
#=======================================================================
# Create & Update
#=======================================================================
flt_list = tenant['Filter'] if 'Filter' in tenant and isinstance(tenant['Filter'], list) else []
for flt in flt_list:
flt_obj = tenant_obj.Filter.create(**parse_desc_unit(flt))
if verbose: print('UPDATE >> Filter:vzFilter.dn=%s\n' % flt_obj['dn'])
flt_objs[flt_obj['dn']] = flt_obj
tenant_flt_objs[flt_obj['name']] = flt_obj
fe_list = flt['FilterEntry'] if 'FilterEntry' in flt and isinstance(flt['FilterEntry'], list) else []
for fe in fe_list:
fe_obj = flt_obj.FilterEntry.create(**parse_desc_unit(fe))
if verbose: print('UPDATE >> FilterEntry:vzEntry.dn=%s\n' % fe_obj['dn'])
fe_objs[fe_obj['dn']] = fe_obj
tenant_fe_objs[fe_obj['name']] = fe_obj
ctr_list = tenant['Contract'] if 'Contract' in tenant and isinstance(tenant['Contract'], list) else []
for ctr in ctr_list:
ctr_obj = tenant_obj.Contract.create(**parse_desc_unit(ctr))
if verbose: print('UPDATE >> Contract:vzBrCP.dn=%s\n' % ctr_obj['dn'])
ctr_objs[ctr_obj['dn']] = ctr_obj
tenant_ctr_objs[ctr_obj['name']] = ctr_obj
sj_list = ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'], list) else []
for sj in sj_list:
sj_obj = ctr_obj.Subject.create(**parse_desc_unit(sj))
if verbose: print('UPDATE >> Subject:vzSubj.dn=%s\n' % sj_obj['dn'])
sj_objs[sj_obj['dn']] = sj_obj
tenant_sj_objs[sj_obj['name']] = sj_obj
ctx_list = tenant['Context'] if 'Context' in tenant and isinstance(tenant['Context'], list) else []
for ctx in ctx_list:
ctx_obj = tenant_obj.Context.create(**parse_desc_unit(ctx))
if verbose: print('UPDATE >> Context:fvCtx.dn=%s\n' % ctx_obj['dn'])
ctx_objs[ctx_obj['dn']] = ctx_obj
tenant_ctx_objs[ctx_obj['name']] = ctx_obj
l3e_list = tenant['L3External'] if 'L3External' in tenant and isinstance(tenant['L3External'], list) else []
for l3e in l3e_list:
l3e_obj = tenant_obj.L3Out.create(**parse_desc_unit(l3e))
if verbose: print('UPDATE >> L3External:l3extOut.dn=%s\n' % l3e_obj['dn'])
l3e_objs[l3e_obj['dn']] = l3e_obj
tenant_l3e_objs[l3e_obj['name']] = l3e_obj
bd_list = tenant['BridgeDomain'] if 'BridgeDomain' in tenant and isinstance(tenant['BridgeDomain'], list) else []
for bd in bd_list:
bd_obj = tenant_obj.BridgeDomain.create(**parse_desc_unit(bd))
if verbose: print('UPDATE >> BridgeDomain:fvBD.dn=%s\n' % bd_obj['dn'])
bd_objs[bd_obj['dn']] = bd_obj
tenant_bd_objs[bd_obj['name']] = bd_obj
sn_list = bd['Subnet'] if 'Subnet' in bd and isinstance(bd['Subnet'], list) else []
for sn in sn_list:
sn_obj = bd_obj.Subnet.create(**parse_desc_unit(sn))
if verbose: print('UPDATE >> Subnet:fvSubnet.dn=%s\n' % sn_obj['dn'])
sn_objs[sn_obj['dn']] = sn_obj
tenant_sn_objs[sn_obj['name']] = sn_obj
ap_list = tenant['AppProfile'] if 'AppProfile' in tenant and isinstance(tenant['AppProfile'], list) else []
for ap in ap_list:
ap_obj = tenant_obj.AppProfile.create(**parse_desc_unit(ap))
if verbose: print('UPDATE >> AppProfile:fvAp.dn=%s\n' % ap_obj['dn'])
ap_objs[ap_obj['dn']] = ap_obj
tenant_ap_objs[ap_obj['name']] = ap_obj
epg_list = ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'], list) else []
for epg in epg_list:
epg_obj = ap_obj.EPG.create(**parse_desc_unit(epg))
if verbose: print('UPDATE >> EPG:fvAEPg.dn=%s\n' % epg_obj['dn'])
epg_objs[epg_obj['dn']] = epg_obj
tenant_epg_objs[epg_obj['name']] = epg_obj
#=======================================================================
# Relations
#=======================================================================
for ctr in ctr_list:
sj_list = ctr['Subject'] if 'Subject' in ctr and isinstance(ctr['Subject'], list) else []
for sj in sj_list:
if 'Filter' in sj:
for flt in sj['Filter']:
try: tenant_sj_objs[sj['name']].relate(tenant_flt_objs[flt])
except:
try: tenant_sj_objs[sj['name']].relate(common.Filter(flt))
except:
if verbose: print('RELATE FAILED >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\n' % (sj['name'], flt))
if verbose: print('RELATE >> Subject:vzSubj.name=%s to Filter:vzFilter.name=%s\n' % (sj['name'], flt))
for l3e in l3e_list:
if 'Context' in l3e:
try: tenant_l3e_objs[l3e['name']].relate(tenant_ctx_objs[l3e['Context']])
except:
try: tenant_l3e_objs[l3e['name']].relate(common.Context(l3e['Context']))
except:
if verbose: print('RELATE FAILED>> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\n' % (bd['name'], bd['Context']))
if verbose: print('RELATE >> L3External:l3extOut.name=%s to Context:fvCtx.name=%s\n' % (bd['name'], bd['Context']))
for bd in bd_list:
if 'Context' in bd:
try: tenant_bd_objs[bd['name']].relate(tenant_ctx_objs[bd['Context']])
except:
try: tenant_bd_objs[bd['name']].relate(common.Context(bd['Context']))
except:
if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\n' % (bd['name'], bd['Context']))
if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to Context:fvCtx.name=%s\n' % (bd['name'], bd['Context']))
if 'L3External' in bd:
try: tenant_bd_objs[bd['name']].relate(tenant_l3e_objs[bd['L3External']])
except:
try: tenant_bd_objs[bd['name']].relate(common.L3External(bd['L3External']))
except:
if verbose: print('RELATE FAILED>> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\n' % (bd['name'], bd['L3External']))
if verbose: print('RELATE >> BridgeDomain:fvBD.name=%s to L3External:l3extOut.name=%s\n' % (bd['name'], bd['L3External']))
for ap in ap_list:
epg_list = ap['EPG'] if 'EPG' in ap and isinstance(ap['EPG'], list) else []
for epg in epg_list:
if 'BridgeDomain' in epg:
try: tenant_epg_objs[epg['name']].relate(tenant_bd_objs[epg['BridgeDomain']])
except:
try: tenant_epg_objs[epg['name']].relate(common.BridgeDomain(epg['BridgeDomain']))
except:
if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\n' % (epg_obj['name'], epg['BridgeDomain']))
if verbose: print('RELATE >> EPG:fvAEPg.name=%s to BridgeDomain:fvBD.name=%s\n' % (epg_obj['name'], epg['BridgeDomain']))
if 'Consume' in epg:
for cons in epg['Consume']:
try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[cons])
except:
try: tenant_epg_objs[epg['name']].relate(common.Contract(cons))
except:
if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\n' % (epg_obj['name'], cons))
if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Consume:vzBrCP.name=%s\n' % (epg_obj['name'], cons))
if 'Provide' in epg:
for prov in epg['Provide']:
try: tenant_epg_objs[epg['name']].relate(tenant_ctr_objs[prov])
except:
try: tenant_epg_objs[epg['name']].relate(common.Contract(prov))
except:
if verbose: print('RELATE FAILED>> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\n' % (epg_obj['name'], prov))
if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Provide:vzBrCP.name=%s\n' % (epg_obj['name'], prov))
if 'Path' in epg:
for path in epg['Path']:
ep_obj = dom.Pod(path['Pod']).Paths(path['Node']).Path(path['Intf'])
tenant_epg_objs[epg['name']].relate(ep_obj, **parse_desc_unit(path))
if verbose: print('RELATE >> EPG:fvAEPg.name=%s to Path:PathEp.name=%s\n' % (epg_obj['name'], path['Pod'] + '/' + path['Node'] + '/' + path['Intf']))
if delete_empty_tenant and len(tenant_ctx_objs) == 0 and len(tenant_bd_objs) == 0 and len(tenant_ap_objs) == 0:
delete_tenants.append(tenant['name'])
def object_delete(obj):
dn = obj['dn']
obj.delete()
if verbose: print('DELETE >> %s.dn=%s\n' % (obj.class_name, dn))
def recursive_delete(obj):
children = obj.children()
for child in children:
if isinstance(child, aciFilterModel): recursive_delete(child)
elif isinstance(child, aciContractModel): recursive_delete(child)
elif isinstance(child, aciContextModel): recursive_delete(child)
elif isinstance(child, aciL3OutModel): recursive_delete(child)
elif isinstance(child, aciBridgeDomainModel): recursive_delete(child)
elif isinstance(child, aciFilterEntryModel): recursive_delete(child)
elif isinstance(child, aciSubjectModel): recursive_delete(child)
elif isinstance(child, aciSubnetModel): recursive_delete(child)
elif isinstance(child, aciAppProfileModel): recursive_delete(child)
elif isinstance(child, aciEPGModel): recursive_delete(child)
if isinstance(obj, aciFilterModel):
if obj['dn'] not in flt_objs: object_delete(obj)
elif isinstance(obj, aciContractModel):
if obj['dn'] not in ctr_objs: object_delete(obj)
elif isinstance(obj, aciContextModel):
if obj['dn'] not in ctx_objs: object_delete(obj)
elif isinstance(obj, aciL3OutModel):
if obj['dn'] not in l3e_objs: object_delete(obj)
elif isinstance(obj, aciFilterEntryModel):
if obj['dn'] not in fe_objs: object_delete(obj)
elif isinstance(obj, aciSubjectModel):
if obj['dn'] not in sj_objs: object_delete(obj)
elif isinstance(obj, aciBridgeDomainModel):
if obj['dn'] not in bd_objs: object_delete(obj)
elif isinstance(obj, aciAppProfileModel):
if obj['dn'] not in ap_objs: object_delete(obj)
elif isinstance(obj, aciSubnetModel):
if obj['dn'] not in sn_objs: object_delete(obj)
elif isinstance(obj, aciEPGModel):
if obj['dn'] not in epg_objs: object_delete(obj)
if not deploy_incremental:
for tenant in tenant_list:
try: tenant_obj = dom.Tenant(tenant['name'])
except: continue
recursive_delete(tenant_obj)
if tenant['name'] in delete_tenants:
object_delete(tenant_obj)
dom.close()
return {'Tenant' : tenant_objs.keys(),
'Filter' : flt_objs.keys(),
'Contract' : ctr_objs.keys(),
'Context' : ctx_objs.keys(),
'L3External' : l3e_objs.keys(),
'BridgeDomain' : bd_objs.keys(),
'FilterEntry' : fe_objs.keys(),
'Subject' : sj_objs.keys(),
'Subnet' : sn_objs.keys(),
'AppProfile' : ap_objs.keys(),
'EPG' : epg_objs.keys()}
|
[
"re.search"
] |
[((1247, 1275), 're.search', 're.search', (['"""^[a-z]\\\\w*"""', 'key'], {}), "('^[a-z]\\\\w*', key)\n", (1256, 1275), False, 'import re\n')]
|
"""
Config Reader
@author: <NAME>
"""
#!/usr/bin/python
from ConfigParser import ConfigParser
class ConfigParse():
'''
This class reads config.ini file and sets the required user inputs
in the class attributes.
Attributes
----------
1. word2vec_model
Type: str
Description: Path of word2vec trained model file.
Default Value: 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin'
2. threshold
Type: float
Description: Threshold value to be used for clustering
Default Value: 0.80
3. input_file_path
Type: str
Description: Path of input text file containing sentences to be clustered.
Default Value: None
4. output_dir_path
Type: str
Description: Path of directory where output clusters are to be kept.
Default Value: output_clusters
5. cluster_overlap
Type: bool
Description: If set to False, then no two clusters will have same sentence.
Default Value: True
6. word_vector_dim
Type: int
Description: Dimension of word vectors.
Default Value: 300
7. representative_word_vector
Type: str
Description: Specify whether the representative sentence of each cluster is to be
computed using "add" or "average".
Default Value: average
'''
def __init__(self):
'''
This method declares the class attributes.
'''
self.word2vec_model = 'GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin'
self.threshold = 0.80
self.input_file_path = None
self.output_dir_path = './output_clusters'
self.cluster_overlap = True
self.word_vector_dim = 300
self.representative_word_vector = 'average'
def config_reader(self):
'''
This method parses the config file and read the variables defined by
the user in the config.ini file. The values of the variables are then
set in the corresponding class attributes.
'''
parser = ConfigParser()
# Read config.ini
parser.read('config.ini')
# Read input variables for the code
if parser.get('Input Variables','word2vec_model'):
self.word2vec_model = parser.get('Input Variables','word2vec_model')
if parser.get('Input Variables','threshold'):
self.threshold = parser.getfloat('Input Variables','threshold')
if parser.get('Input Variables','input_file_path'):
self.input_file_path = parser.get('Input Variables','input_file_path')
if parser.get('Input Variables', 'output_dir_path'):
self.output_dir_path = parser.get('Input Variables', 'output_dir_path')
if parser.get('Input Variables', 'cluster_overlap'):
self.cluster_overlap = parser.getboolean('Input Variables', 'cluster_overlap')
if parser.get('Input Variables', 'word_vector_dim'):
self.word_vector_dim = parser.getint('Input Variables', 'word_vector_dim')
if parser.get('Input Variables', 'representative_word_vector'):
self.representative_word_vector = parser.get('Input Variables', 'representative_word_vector')
|
[
"ConfigParser.ConfigParser"
] |
[((2107, 2121), 'ConfigParser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (2119, 2121), False, 'from ConfigParser import ConfigParser\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 2 12:21:09 2021
@author: Mahmu
"""
import random
import pylab
import numpy as np
x1 = random.uniform(-1, 1)
y1 = random.uniform(-1, 1)
print(str(x1) + "\n" + str(y1))
|
[
"random.uniform"
] |
[((137, 158), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (151, 158), False, 'import random\n'), ((164, 185), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (178, 185), False, 'import random\n')]
|
# -*- coding: utf-8 -*-
from django.contrib.gis.db import models
from django.contrib.postgres.fields.jsonb import JSONField
from django.utils.translation import ugettext_lazy as _
from saywiti.common.models import TimeStampedModel
class Level(TimeStampedModel):
parent = models.ForeignKey('self', related_name='children', null=True, blank=True)
name = models.CharField(_('Name'), max_length=100)
description = models.CharField(_('Description'), max_length=255, null=True, blank=True)
def __str__(self):
return self.name
class Region(TimeStampedModel):
parent = models.ForeignKey('self', related_name='children', null=True, blank=True)
level = models.ForeignKey('Level', related_name='regions')
name = models.CharField(_('Name'), max_length=100)
is_osm_relation = models.BooleanField(_('Is an OSM relation?'), default=False)
osm_tags = JSONField(_('OSM Tags'), null=True, blank=True)
osm_relation_id = models.IntegerField(_('OSM Relation ID'), null=True, blank=True)
polygon = models.PolygonField()
def __str__(self):
return self.name
|
[
"django.utils.translation.ugettext_lazy",
"django.contrib.gis.db.models.ForeignKey",
"django.contrib.gis.db.models.PolygonField"
] |
[((279, 352), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['"""self"""'], {'related_name': '"""children"""', 'null': '(True)', 'blank': '(True)'}), "('self', related_name='children', null=True, blank=True)\n", (296, 352), False, 'from django.contrib.gis.db import models\n'), ((598, 671), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['"""self"""'], {'related_name': '"""children"""', 'null': '(True)', 'blank': '(True)'}), "('self', related_name='children', null=True, blank=True)\n", (615, 671), False, 'from django.contrib.gis.db import models\n'), ((684, 734), 'django.contrib.gis.db.models.ForeignKey', 'models.ForeignKey', (['"""Level"""'], {'related_name': '"""regions"""'}), "('Level', related_name='regions')\n", (701, 734), False, 'from django.contrib.gis.db import models\n'), ((1039, 1060), 'django.contrib.gis.db.models.PolygonField', 'models.PolygonField', ([], {}), '()\n', (1058, 1060), False, 'from django.contrib.gis.db import models\n'), ((382, 391), 'django.utils.translation.ugettext_lazy', '_', (['"""Name"""'], {}), "('Name')\n", (383, 391), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((444, 460), 'django.utils.translation.ugettext_lazy', '_', (['"""Description"""'], {}), "('Description')\n", (445, 460), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((764, 773), 'django.utils.translation.ugettext_lazy', '_', (['"""Name"""'], {}), "('Name')\n", (765, 773), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((833, 857), 'django.utils.translation.ugettext_lazy', '_', (['"""Is an OSM relation?"""'], {}), "('Is an OSM relation?')\n", (834, 857), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((899, 912), 'django.utils.translation.ugettext_lazy', '_', (['"""OSM Tags"""'], {}), "('OSM Tags')\n", (900, 912), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((979, 999), 'django.utils.translation.ugettext_lazy', '_', (['"""OSM Relation ID"""'], {}), "('OSM Relation ID')\n", (980, 999), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
from doppelkopf.toggles import Toggle
from datetime import datetime, timedelta
toggles_from_db = [
Toggle(name="db-only", enabled=False),
Toggle(name="db-and-code", enabled=True),
]
toggles_from_code = [
Toggle(name="code-only", enabled=False),
Toggle(name="db-and-code", enabled=False),
]
def test_merge_toggles():
merged = Toggle.merge(toggles_from_db, toggles_from_code)
code_and_db = Toggle(
name="db-and-code",
description="a toggle thats defined in code and database",
enabled=True,
)
code_only = Toggle(
name="code-only",
description="a toggle thats only defined in code",
enabled=False,
)
assert code_and_db in merged
assert code_only in merged
assert len(merged) == 2
def test_serialize():
toggle = Toggle(id=1, name="some", enabled=True, description="some description")
expected_serialization = {"id": 1, "name": "some", "enabled": True}
assert toggle.serialize() == expected_serialization
def test_update_toggle_state():
last_changed = datetime.utcnow() - timedelta(days=2)
t = Toggle(name="some-toggle", enabled=False, last_changed_at=last_changed)
t.toggle()
assert t.enabled is True
assert t.last_changed_at > datetime.utcnow() - timedelta(seconds=2)
|
[
"datetime.datetime.utcnow",
"datetime.timedelta",
"doppelkopf.toggles.Toggle",
"doppelkopf.toggles.Toggle.merge"
] |
[((105, 142), 'doppelkopf.toggles.Toggle', 'Toggle', ([], {'name': '"""db-only"""', 'enabled': '(False)'}), "(name='db-only', enabled=False)\n", (111, 142), False, 'from doppelkopf.toggles import Toggle\n'), ((148, 188), 'doppelkopf.toggles.Toggle', 'Toggle', ([], {'name': '"""db-and-code"""', 'enabled': '(True)'}), "(name='db-and-code', enabled=True)\n", (154, 188), False, 'from doppelkopf.toggles import Toggle\n'), ((219, 258), 'doppelkopf.toggles.Toggle', 'Toggle', ([], {'name': '"""code-only"""', 'enabled': '(False)'}), "(name='code-only', enabled=False)\n", (225, 258), False, 'from doppelkopf.toggles import Toggle\n'), ((264, 305), 'doppelkopf.toggles.Toggle', 'Toggle', ([], {'name': '"""db-and-code"""', 'enabled': '(False)'}), "(name='db-and-code', enabled=False)\n", (270, 305), False, 'from doppelkopf.toggles import Toggle\n'), ((350, 398), 'doppelkopf.toggles.Toggle.merge', 'Toggle.merge', (['toggles_from_db', 'toggles_from_code'], {}), '(toggles_from_db, toggles_from_code)\n', (362, 398), False, 'from doppelkopf.toggles import Toggle\n'), ((418, 522), 'doppelkopf.toggles.Toggle', 'Toggle', ([], {'name': '"""db-and-code"""', 'description': '"""a toggle thats defined in code and database"""', 'enabled': '(True)'}), "(name='db-and-code', description=\n 'a toggle thats defined in code and database', enabled=True)\n", (424, 522), False, 'from doppelkopf.toggles import Toggle\n'), ((565, 659), 'doppelkopf.toggles.Toggle', 'Toggle', ([], {'name': '"""code-only"""', 'description': '"""a toggle thats only defined in code"""', 'enabled': '(False)'}), "(name='code-only', description='a toggle thats only defined in code',\n enabled=False)\n", (571, 659), False, 'from doppelkopf.toggles import Toggle\n'), ((817, 888), 'doppelkopf.toggles.Toggle', 'Toggle', ([], {'id': '(1)', 'name': '"""some"""', 'enabled': '(True)', 'description': '"""some description"""'}), "(id=1, name='some', enabled=True, description='some description')\n", (823, 888), False, 'from doppelkopf.toggles import Toggle\n'), ((1117, 1188), 'doppelkopf.toggles.Toggle', 'Toggle', ([], {'name': '"""some-toggle"""', 'enabled': '(False)', 'last_changed_at': 'last_changed'}), "(name='some-toggle', enabled=False, last_changed_at=last_changed)\n", (1123, 1188), False, 'from doppelkopf.toggles import Toggle\n'), ((1071, 1088), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1086, 1088), False, 'from datetime import datetime, timedelta\n'), ((1091, 1108), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (1100, 1108), False, 'from datetime import datetime, timedelta\n'), ((1266, 1283), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1281, 1283), False, 'from datetime import datetime, timedelta\n'), ((1286, 1306), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(2)'}), '(seconds=2)\n', (1295, 1306), False, 'from datetime import datetime, timedelta\n')]
|
import os,random
os.environ["KERAS_BACKEND"] = "tensorflow"
from PIL import Image
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
import h5py
import numpy as np
from keras.layers import Input,merge,Lambda
from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D,AveragePooling2D, Conv2DTranspose
from keras.layers.normalization import *
from keras.optimizers import *
from keras import initializers
import matplotlib.pyplot as plt
import cPickle, random, sys, keras
from keras.models import Model
from functools import partial
normal = partial(initializers.normal, scale=.02)
## load and preprocess the dataset (use FERG for example) ##
batch_size = 256
num_ep = 7
num_pp = 6
epochs = 1000
img_rows, img_cols = 64, 64
clipvalue = 20
noise_dim = 10
c_dim = num_pp
n_dim = 10
z_dim = 128
date = 2018
#
print ('Loading data...')
f = h5py.File('FERG_64_64_color.mat')
print ('Finished loading....')
f = f['imdb']
label1 = f['id']
label1 = np.asarray(label1)
label1 -= 1
label2 = f['ep']
label2 = np.asarray(label2)
label2 -= 1
label3 = f['set']
label3 = np.asarray(label3)
FrameNum = f['fn']
FrameNum = np.asarray(FrameNum)
x = f['images']
x = np.asarray(x);
x = np.transpose(x, [3,2,1,0]) # matlab ordering to python ordering
print('x shape:', x.shape)
idx_train = np.asarray(np.where(label3 == 0))
idx_test = np.asarray(np.where(label3 == 1))
print('idx_test shape',idx_test.shape)
x_train = x[idx_train[1,:],:,:,:]
x_test = x[idx_test[1,:],:,:,:]
y_train1 = label1[:,idx_train[1,:]]
y_test1 = label1[:,idx_test[1,:]]
y_train2 = label2[:,idx_train[1,:]]
y_test2 = label2[:,idx_test[1,:]]
y_test1_ori = y_test1
y_test2_ori = y_test2
x_train = (x_train- 127.5)/127.5
x_test = (x_test- 127.5)/127.5
x_train = x_train.astype('float16')
x_test = x_test.astype('float16')
y_train1 = keras.utils.to_categorical(y_train1, num_pp)
y_test1 = keras.utils.to_categorical(y_test1, num_pp)
y_train2 = keras.utils.to_categorical(y_train2, num_ep)
y_test2 = keras.utils.to_categorical(y_test2, num_ep)
###############################
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('label 1 train', y_train1.shape)
print('label 1 test', y_test1.shape)
print('label 2 train', y_train2.shape)
print('label 2 test', y_test2.shape)
#
x_ori = (x - 127.5)/127.5
opt = RMSprop(lr = 0.0003,decay = 1e-6)
dopt = RMSprop(lr = 0.0003,decay = 1e-6)
epsilon_std = 1.0
def KL_loss(y_true, y_pred):
z_mean = y_pred[:, 0:z_dim]
z_log_var = y_pred[:, z_dim:2 * z_dim]
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return K.mean(kl_loss)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], z_dim), mean=0.,
stddev=epsilon_std)
return z_mean + K.exp((z_log_var) / 2) * epsilon
############ Build the GAN architecture #################
def model_encoder(z_dim, input_shape, units=512, dropout=0.3):
k = 5
x = Input(input_shape)
h = Conv2D(units/8 , (k, k), strides = (2,2), border_mode='same')(x)
h = BatchNormalization(momentum=0.8)(h)
h = Dropout(dropout)(h)
# h = MaxPooling2D(pool_size=(2, 2))(h)
h = LeakyReLU(0.2)(h)
h = Conv2D(units/4, (k, k), strides = (2,2), border_mode='same')(h)
h = BatchNormalization(momentum=0.8)(h)
h = Dropout(dropout)(h)
# h = MaxPooling2D(pool_size=(2, 2))(h)
h = LeakyReLU(0.2)(h)
h = Conv2D(units / 2, (k, k), strides = (2,2), border_mode='same')(h)
h = BatchNormalization(momentum=0.8)(h)
h = Dropout(dropout)(h)
# h = MaxPooling2D(pool_size=(2, 2))(h)
h = LeakyReLU(0.2)(h)
h = Conv2D(units , (k, k), strides = (2,2), border_mode='same')(h)
h = BatchNormalization(momentum=0.8)(h)
h = Dropout(dropout)(h)
h = LeakyReLU(0.2)(h)
# h = AveragePooling2D((6,6))(h)
h = Flatten()(h)
# h = Dense(latent_dim, name="encoder_mu")(h)
mean = Dense(z_dim, name="encoder_mean")(h)
logvar = Dense(z_dim, name="encoder_sigma", activation = 'sigmoid')(h)
# meansigma = Model(x, [mean, logsigma],name='encoder')
z = Lambda(sampling, output_shape=(z_dim,))([mean, logvar])
h2 = keras.layers.concatenate([mean,logvar])
return Model(x,[z, h2], name = 'Encoder')
def model_decoder(z_dim, c_dim):
k = 5
x = Input(shape = (z_dim,))
auxiliary_c = Input(shape=(c_dim,), name='aux_input_c')
# auxiliary_z = Input(shape=(n_dim,), name='aux_input_z')
h = keras.layers.concatenate([x, auxiliary_c])
h = Dense(4 * 4 * 128, activation = 'relu')(h)
h = Reshape((4, 4, 128))(h)
# h = LeakyReLU(0.2)(h)
h = Conv2DTranspose(units, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 32*32*64
# h = Dropout(dropout)(h)
h = BatchNormalization(momentum=0.8)(h)
# h = LeakyReLU(0.2)(h)
h = Conv2DTranspose(units/2, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 64*64*64
# h = Dropout(dropout)(h)
h = BatchNormalization(momentum=0.8)(h)
# h = LeakyReLU(0.2)(h)
h = Conv2DTranspose(units/2, (k,k), strides = (2,2), padding = 'same', activation = 'relu')(h) # 8*6*64
# h = Dropout(dropout)(h)
h = BatchNormalization(momentum=0.8)(h)
h = Conv2DTranspose(3, (k,k), strides = (2,2), padding = 'same', activation = 'tanh')(h) # 8*6*64
return Model([x,auxiliary_c], h, name="Decoder")
# #### reload the trained weights to implement the anticipated applications####
input_img = Input((img_rows,img_cols,3))
z_dim = 128
units = 256
ee = 200
auxiliary_c = Input(shape=(c_dim,), name='aux_input_c')
auxiliary_z = Input(shape=(n_dim,), name='aux_input_z')
# generator = model_generator(z_dim = z_dim, input_shape =(img_rows, img_cols, 1) , units=units, dropout=0.3)
encoder = model_encoder(z_dim = z_dim, input_shape =(img_rows, img_cols, 3) , units=units, dropout=0.3)
encoder.load_weights('trained_weight_1.h5')
encoder.compile(loss = 'binary_crossentropy',optimizer = opt)
encoder.summary()
decoder = model_decoder(z_dim = z_dim, c_dim=c_dim)
decoder.load_weights('trained_weight_2.h5')
decoder.compile(loss = 'binary_crossentropy',optimizer = opt)
decoder.summary()
##### expression morphing #####x
for xx in xrange(0,1):
idx1 = 4300
idx2 = 7423
img1 = np.squeeze(x_ori[idx1, :, :, :])
img2 = np.squeeze(x_ori[idx2, :, :, :])
z_1, mean_var_imp = encoder.predict(np.expand_dims(img1, axis=0))
z_2, mean_var_imp = encoder.predict(np.expand_dims(img2, axis=0))
plt.figure(figsize=(2, 2))
img1 =np.squeeze(x_ori[idx1,:,:,:])
img1 = np.uint8(img1*127.5+127.5)
image = Image.fromarray(img1, 'RGB')
image.save('ori_1.tif')
img2 = np.squeeze(x_ori[idx2,:,:,:])
img2 = np.uint8(img2*127.5+127.5)
# plt.imshow(img2)
image = Image.fromarray(img2, 'RGB')
image.save('ori_2.tif')
arr = np.linspace(0.0, 1.0, num=1000)
for ii in xrange(0,1000):
c = np.ones((1,))*0
c = keras.utils.to_categorical(c, num_pp)
z_interp = z_1*(arr[ii])+z_2*(1.0-arr[ii])
z_interp = np.reshape(z_interp,(1,z_dim))
img = decoder.predict([z_interp,c])
img = np.squeeze(img)
img = np.uint8(img*127.5+127.5)
image = Image.fromarray(img, 'RGB')
image.save('interp_'+str(ii)+'.tif')
# ############### Image impanting ##############
loc = 'bottom'
for pp in xrange(0,1):
for xx in xrange(0,8):
idx = 123
input_img = np.squeeze(x_ori[idx,:,:,:])
img = np.uint8(input_img*127.5+127.5)
image = Image.fromarray(img, 'RGB')
image.save('original.tif')
impanted_img = np.squeeze(x_ori[idx,:,:,:])
impanted_img[40:55,18:47,:] = 0 # mouth blocked
print('impanted_img',impanted_img.shape)
z_impanted,mean_var_imp = encoder.predict(np.expand_dims(impanted_img,axis =0))
c = np.ones((1,))*1
c = keras.utils.to_categorical(c, num_pp)
print('c',c)
img_rec = decoder.predict([z_impanted,c])
img_rec = np.squeeze(img_rec)
img = np.uint8(impanted_img*127.5+127.5)
image = Image.fromarray(img, 'RGB')
image.save('test_blocked_pp1'+'.tif')
img = np.uint8(img_rec*127.5+127.5)
image = Image.fromarray(img, 'RGB')
image.save('test_rec_pp1'+'.tif')
impanted_img[40:55,18:47,:] = img_rec[40:55,18:47,:]
img = np.uint8(impanted_img*127.5+127.5)
image = Image.fromarray(img, 'RGB')
image.save('test_replaced_pp1'+'.tif')
#### Generate images without input image ###
def sampling_np( z_mean, z_log_var ):
epsilon = np.random.normal(loc=0., scale=epsilon_std, size=(z_mean.shape[0], z_dim), )
return z_mean + np.exp(z_log_var / 2) * epsilon
# mean and variance of the prior distribution #
mean_train_sup = np.zeros((1,128))
var_train_sup = np.ones((1,128))
for i in xrange(0,num_pp):
for xx in xrange(0,100):
z = sampling_np(mean_train_sup, var_train_sup)
print(z.shape)
c = np.ones(1,)*i
c = keras.utils.to_categorical(c, num_pp)
img = decoder.predict([z, c])
img = np.squeeze(img)
img = np.uint8(img*127.5+127.5)
image = Image.fromarray(img, 'RGB')
image.save('synthesis_no_input_'+'pp_'+str(i)+'.tif')
|
[
"keras.layers.core.Reshape",
"numpy.ones",
"keras.models.Model",
"matplotlib.pyplot.figure",
"numpy.exp",
"numpy.random.normal",
"keras.layers.core.Flatten",
"keras.layers.Input",
"keras.layers.concatenate",
"numpy.transpose",
"numpy.reshape",
"numpy.linspace",
"keras.layers.core.Dropout",
"keras.utils.to_categorical",
"functools.partial",
"h5py.File",
"numpy.uint8",
"keras.layers.core.Dense",
"keras.layers.convolutional.Conv2DTranspose",
"numpy.asarray",
"keras.layers.Conv2D",
"numpy.squeeze",
"numpy.zeros",
"numpy.expand_dims",
"numpy.where",
"keras.layers.advanced_activations.LeakyReLU",
"keras.layers.Lambda",
"PIL.Image.fromarray"
] |
[((725, 765), 'functools.partial', 'partial', (['initializers.normal'], {'scale': '(0.02)'}), '(initializers.normal, scale=0.02)\n', (732, 765), False, 'from functools import partial\n'), ((1065, 1098), 'h5py.File', 'h5py.File', (['"""FERG_64_64_color.mat"""'], {}), "('FERG_64_64_color.mat')\n", (1074, 1098), False, 'import h5py\n'), ((1176, 1194), 'numpy.asarray', 'np.asarray', (['label1'], {}), '(label1)\n', (1186, 1194), True, 'import numpy as np\n'), ((1233, 1251), 'numpy.asarray', 'np.asarray', (['label2'], {}), '(label2)\n', (1243, 1251), True, 'import numpy as np\n'), ((1291, 1309), 'numpy.asarray', 'np.asarray', (['label3'], {}), '(label3)\n', (1301, 1309), True, 'import numpy as np\n'), ((1340, 1360), 'numpy.asarray', 'np.asarray', (['FrameNum'], {}), '(FrameNum)\n', (1350, 1360), True, 'import numpy as np\n'), ((1390, 1403), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1400, 1403), True, 'import numpy as np\n'), ((1413, 1442), 'numpy.transpose', 'np.transpose', (['x', '[3, 2, 1, 0]'], {}), '(x, [3, 2, 1, 0])\n', (1425, 1442), True, 'import numpy as np\n'), ((2047, 2091), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train1', 'num_pp'], {}), '(y_train1, num_pp)\n', (2073, 2091), False, 'import cPickle, random, sys, keras\n'), ((2103, 2146), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test1', 'num_pp'], {}), '(y_test1, num_pp)\n', (2129, 2146), False, 'import cPickle, random, sys, keras\n'), ((2158, 2202), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train2', 'num_ep'], {}), '(y_train2, num_ep)\n', (2184, 2202), False, 'import cPickle, random, sys, keras\n'), ((2214, 2257), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test2', 'num_ep'], {}), '(y_test2, num_ep)\n', (2240, 2257), False, 'import cPickle, random, sys, keras\n'), ((5749, 5779), 'keras.layers.Input', 'Input', (['(img_rows, img_cols, 3)'], {}), '((img_rows, img_cols, 3))\n', (5754, 5779), False, 'from keras.layers import Input, merge, Lambda\n'), ((5837, 5878), 'keras.layers.Input', 'Input', ([], {'shape': '(c_dim,)', 'name': '"""aux_input_c"""'}), "(shape=(c_dim,), name='aux_input_c')\n", (5842, 5878), False, 'from keras.layers import Input, merge, Lambda\n'), ((5893, 5934), 'keras.layers.Input', 'Input', ([], {'shape': '(n_dim,)', 'name': '"""aux_input_z"""'}), "(shape=(n_dim,), name='aux_input_z')\n", (5898, 5934), False, 'from keras.layers import Input, merge, Lambda\n'), ((9090, 9108), 'numpy.zeros', 'np.zeros', (['(1, 128)'], {}), '((1, 128))\n', (9098, 9108), True, 'import numpy as np\n'), ((9126, 9143), 'numpy.ones', 'np.ones', (['(1, 128)'], {}), '((1, 128))\n', (9133, 9143), True, 'import numpy as np\n'), ((1527, 1548), 'numpy.where', 'np.where', (['(label3 == 0)'], {}), '(label3 == 0)\n', (1535, 1548), True, 'import numpy as np\n'), ((1573, 1594), 'numpy.where', 'np.where', (['(label3 == 1)'], {}), '(label3 == 1)\n', (1581, 1594), True, 'import numpy as np\n'), ((3238, 3256), 'keras.layers.Input', 'Input', (['input_shape'], {}), '(input_shape)\n', (3243, 3256), False, 'from keras.layers import Input, merge, Lambda\n'), ((4440, 4480), 'keras.layers.concatenate', 'keras.layers.concatenate', (['[mean, logvar]'], {}), '([mean, logvar])\n', (4464, 4480), False, 'import cPickle, random, sys, keras\n'), ((4491, 4524), 'keras.models.Model', 'Model', (['x', '[z, h2]'], {'name': '"""Encoder"""'}), "(x, [z, h2], name='Encoder')\n", (4496, 4524), False, 'from keras.models import Model\n'), ((4578, 4599), 'keras.layers.Input', 'Input', ([], {'shape': '(z_dim,)'}), '(shape=(z_dim,))\n', (4583, 4599), False, 'from keras.layers import Input, merge, Lambda\n'), ((4620, 4661), 'keras.layers.Input', 'Input', ([], {'shape': '(c_dim,)', 'name': '"""aux_input_c"""'}), "(shape=(c_dim,), name='aux_input_c')\n", (4625, 4661), False, 'from keras.layers import Input, merge, Lambda\n'), ((4732, 4774), 'keras.layers.concatenate', 'keras.layers.concatenate', (['[x, auxiliary_c]'], {}), '([x, auxiliary_c])\n', (4756, 4774), False, 'import cPickle, random, sys, keras\n'), ((5612, 5654), 'keras.models.Model', 'Model', (['[x, auxiliary_c]', 'h'], {'name': '"""Decoder"""'}), "([x, auxiliary_c], h, name='Decoder')\n", (5617, 5654), False, 'from keras.models import Model\n'), ((6554, 6586), 'numpy.squeeze', 'np.squeeze', (['x_ori[idx1, :, :, :]'], {}), '(x_ori[idx1, :, :, :])\n', (6564, 6586), True, 'import numpy as np\n'), ((6598, 6630), 'numpy.squeeze', 'np.squeeze', (['x_ori[idx2, :, :, :]'], {}), '(x_ori[idx2, :, :, :])\n', (6608, 6630), True, 'import numpy as np\n'), ((6776, 6802), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, 2)'}), '(figsize=(2, 2))\n', (6786, 6802), True, 'import matplotlib.pyplot as plt\n'), ((6813, 6845), 'numpy.squeeze', 'np.squeeze', (['x_ori[idx1, :, :, :]'], {}), '(x_ori[idx1, :, :, :])\n', (6823, 6845), True, 'import numpy as np\n'), ((6854, 6884), 'numpy.uint8', 'np.uint8', (['(img1 * 127.5 + 127.5)'], {}), '(img1 * 127.5 + 127.5)\n', (6862, 6884), True, 'import numpy as np\n'), ((6893, 6921), 'PIL.Image.fromarray', 'Image.fromarray', (['img1', '"""RGB"""'], {}), "(img1, 'RGB')\n", (6908, 6921), False, 'from PIL import Image\n'), ((6961, 6993), 'numpy.squeeze', 'np.squeeze', (['x_ori[idx2, :, :, :]'], {}), '(x_ori[idx2, :, :, :])\n', (6971, 6993), True, 'import numpy as np\n'), ((7002, 7032), 'numpy.uint8', 'np.uint8', (['(img2 * 127.5 + 127.5)'], {}), '(img2 * 127.5 + 127.5)\n', (7010, 7032), True, 'import numpy as np\n'), ((7064, 7092), 'PIL.Image.fromarray', 'Image.fromarray', (['img2', '"""RGB"""'], {}), "(img2, 'RGB')\n", (7079, 7092), False, 'from PIL import Image\n'), ((7131, 7162), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(1000)'}), '(0.0, 1.0, num=1000)\n', (7142, 7162), True, 'import numpy as np\n'), ((8894, 8969), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': 'epsilon_std', 'size': '(z_mean.shape[0], z_dim)'}), '(loc=0.0, scale=epsilon_std, size=(z_mean.shape[0], z_dim))\n', (8910, 8969), True, 'import numpy as np\n'), ((3265, 3326), 'keras.layers.Conv2D', 'Conv2D', (['(units / 8)', '(k, k)'], {'strides': '(2, 2)', 'border_mode': '"""same"""'}), "(units / 8, (k, k), strides=(2, 2), border_mode='same')\n", (3271, 3326), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\n'), ((3382, 3398), 'keras.layers.core.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (3389, 3398), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((3454, 3468), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3463, 3468), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3480, 3541), 'keras.layers.Conv2D', 'Conv2D', (['(units / 4)', '(k, k)'], {'strides': '(2, 2)', 'border_mode': '"""same"""'}), "(units / 4, (k, k), strides=(2, 2), border_mode='same')\n", (3486, 3541), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\n'), ((3597, 3613), 'keras.layers.core.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (3604, 3613), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((3669, 3683), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3678, 3683), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3695, 3756), 'keras.layers.Conv2D', 'Conv2D', (['(units / 2)', '(k, k)'], {'strides': '(2, 2)', 'border_mode': '"""same"""'}), "(units / 2, (k, k), strides=(2, 2), border_mode='same')\n", (3701, 3756), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\n'), ((3813, 3829), 'keras.layers.core.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (3820, 3829), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((3885, 3899), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (3894, 3899), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3911, 3968), 'keras.layers.Conv2D', 'Conv2D', (['units', '(k, k)'], {'strides': '(2, 2)', 'border_mode': '"""same"""'}), "(units, (k, k), strides=(2, 2), border_mode='same')\n", (3917, 3968), False, 'from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\n'), ((4026, 4042), 'keras.layers.core.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (4033, 4042), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((4054, 4068), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (4063, 4068), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((4117, 4126), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (4124, 4126), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((4193, 4226), 'keras.layers.core.Dense', 'Dense', (['z_dim'], {'name': '"""encoder_mean"""'}), "(z_dim, name='encoder_mean')\n", (4198, 4226), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((4243, 4299), 'keras.layers.core.Dense', 'Dense', (['z_dim'], {'name': '"""encoder_sigma"""', 'activation': '"""sigmoid"""'}), "(z_dim, name='encoder_sigma', activation='sigmoid')\n", (4248, 4299), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((4375, 4414), 'keras.layers.Lambda', 'Lambda', (['sampling'], {'output_shape': '(z_dim,)'}), '(sampling, output_shape=(z_dim,))\n', (4381, 4414), False, 'from keras.layers import Input, merge, Lambda\n'), ((4783, 4820), 'keras.layers.core.Dense', 'Dense', (['(4 * 4 * 128)'], {'activation': '"""relu"""'}), "(4 * 4 * 128, activation='relu')\n", (4788, 4820), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((4834, 4854), 'keras.layers.core.Reshape', 'Reshape', (['(4, 4, 128)'], {}), '((4, 4, 128))\n', (4841, 4854), False, 'from keras.layers.core import Reshape, Dense, Dropout, Activation, Flatten\n'), ((4894, 4980), 'keras.layers.convolutional.Conv2DTranspose', 'Conv2DTranspose', (['units', '(k, k)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(units, (k, k), strides=(2, 2), padding='same', activation=\n 'relu')\n", (4909, 4980), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D, AveragePooling2D, Conv2DTranspose\n'), ((5106, 5195), 'keras.layers.convolutional.Conv2DTranspose', 'Conv2DTranspose', (['(units / 2)', '(k, k)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(units / 2, (k, k), strides=(2, 2), padding='same',\n activation='relu')\n", (5121, 5195), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D, AveragePooling2D, Conv2DTranspose\n'), ((5320, 5409), 'keras.layers.convolutional.Conv2DTranspose', 'Conv2DTranspose', (['(units / 2)', '(k, k)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(units / 2, (k, k), strides=(2, 2), padding='same',\n activation='relu')\n", (5335, 5409), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D, AveragePooling2D, Conv2DTranspose\n'), ((5505, 5582), 'keras.layers.convolutional.Conv2DTranspose', 'Conv2DTranspose', (['(3)', '(k, k)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""tanh"""'}), "(3, (k, k), strides=(2, 2), padding='same', activation='tanh')\n", (5520, 5582), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D, AveragePooling2D, Conv2DTranspose\n'), ((6671, 6699), 'numpy.expand_dims', 'np.expand_dims', (['img1'], {'axis': '(0)'}), '(img1, axis=0)\n', (6685, 6699), True, 'import numpy as np\n'), ((6741, 6769), 'numpy.expand_dims', 'np.expand_dims', (['img2'], {'axis': '(0)'}), '(img2, axis=0)\n', (6755, 6769), True, 'import numpy as np\n'), ((7233, 7270), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['c', 'num_pp'], {}), '(c, num_pp)\n', (7259, 7270), False, 'import cPickle, random, sys, keras\n'), ((7341, 7373), 'numpy.reshape', 'np.reshape', (['z_interp', '(1, z_dim)'], {}), '(z_interp, (1, z_dim))\n', (7351, 7373), True, 'import numpy as np\n'), ((7430, 7445), 'numpy.squeeze', 'np.squeeze', (['img'], {}), '(img)\n', (7440, 7445), True, 'import numpy as np\n'), ((7460, 7489), 'numpy.uint8', 'np.uint8', (['(img * 127.5 + 127.5)'], {}), '(img * 127.5 + 127.5)\n', (7468, 7489), True, 'import numpy as np\n'), ((7502, 7529), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""RGB"""'], {}), "(img, 'RGB')\n", (7517, 7529), False, 'from PIL import Image\n'), ((7733, 7764), 'numpy.squeeze', 'np.squeeze', (['x_ori[idx, :, :, :]'], {}), '(x_ori[idx, :, :, :])\n', (7743, 7764), True, 'import numpy as np\n'), ((7776, 7811), 'numpy.uint8', 'np.uint8', (['(input_img * 127.5 + 127.5)'], {}), '(input_img * 127.5 + 127.5)\n', (7784, 7811), True, 'import numpy as np\n'), ((7824, 7851), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""RGB"""'], {}), "(img, 'RGB')\n", (7839, 7851), False, 'from PIL import Image\n'), ((7911, 7942), 'numpy.squeeze', 'np.squeeze', (['x_ori[idx, :, :, :]'], {}), '(x_ori[idx, :, :, :])\n', (7921, 7942), True, 'import numpy as np\n'), ((8174, 8211), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['c', 'num_pp'], {}), '(c, num_pp)\n', (8200, 8211), False, 'import cPickle, random, sys, keras\n'), ((8301, 8320), 'numpy.squeeze', 'np.squeeze', (['img_rec'], {}), '(img_rec)\n', (8311, 8320), True, 'import numpy as np\n'), ((8336, 8374), 'numpy.uint8', 'np.uint8', (['(impanted_img * 127.5 + 127.5)'], {}), '(impanted_img * 127.5 + 127.5)\n', (8344, 8374), True, 'import numpy as np\n'), ((8387, 8414), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""RGB"""'], {}), "(img, 'RGB')\n", (8402, 8414), False, 'from PIL import Image\n'), ((8476, 8509), 'numpy.uint8', 'np.uint8', (['(img_rec * 127.5 + 127.5)'], {}), '(img_rec * 127.5 + 127.5)\n', (8484, 8509), True, 'import numpy as np\n'), ((8522, 8549), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""RGB"""'], {}), "(img, 'RGB')\n", (8537, 8549), False, 'from PIL import Image\n'), ((8668, 8706), 'numpy.uint8', 'np.uint8', (['(impanted_img * 127.5 + 127.5)'], {}), '(impanted_img * 127.5 + 127.5)\n', (8676, 8706), True, 'import numpy as np\n'), ((8719, 8746), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""RGB"""'], {}), "(img, 'RGB')\n", (8734, 8746), False, 'from PIL import Image\n'), ((9316, 9353), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['c', 'num_pp'], {}), '(c, num_pp)\n', (9342, 9353), False, 'import cPickle, random, sys, keras\n'), ((9406, 9421), 'numpy.squeeze', 'np.squeeze', (['img'], {}), '(img)\n', (9416, 9421), True, 'import numpy as np\n'), ((9436, 9465), 'numpy.uint8', 'np.uint8', (['(img * 127.5 + 127.5)'], {}), '(img * 127.5 + 127.5)\n', (9444, 9465), True, 'import numpy as np\n'), ((9478, 9505), 'PIL.Image.fromarray', 'Image.fromarray', (['img', '"""RGB"""'], {}), "(img, 'RGB')\n", (9493, 9505), False, 'from PIL import Image\n'), ((7205, 7218), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (7212, 7218), True, 'import numpy as np\n'), ((8096, 8132), 'numpy.expand_dims', 'np.expand_dims', (['impanted_img'], {'axis': '(0)'}), '(impanted_img, axis=0)\n', (8110, 8132), True, 'import numpy as np\n'), ((8146, 8159), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (8153, 8159), True, 'import numpy as np\n'), ((8991, 9012), 'numpy.exp', 'np.exp', (['(z_log_var / 2)'], {}), '(z_log_var / 2)\n', (8997, 9012), True, 'import numpy as np\n'), ((9290, 9300), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (9297, 9300), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Zeng Zhi
#
# Author: <NAME> <<EMAIL>>
# Maintainer: <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from animation import Animation
from scrolled_window import ScrolledWindow
from button import Button
from theme import ui_theme
from menu import Menu
from constant import DEFAULT_FONT_SIZE
from draw import (draw_line, draw_text, draw_pixbuf)
from utils import (get_content_size, cairo_disable_antialias,
alpha_color_hex_to_cairo, cairo_state)
import gtk
import gobject
import pango
from poplist import Poplist
ARROW_BUTTON_WIDTH = 20
class Bread(gtk.HBox):
'''
Bread widget is a container which can hold crumbs widget.
@undocumented: create_crumb
@undocumented: enter_notify
@undocumented: leave_notify
@undocumented: event_box_press
@undocumented: enter_cb
@undocumented: redraw_bg
@undocumented: click_cb
@undocumented: move_right
@undocumented: move_left
'''
__gsignals__= {
"entry-changed" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)),
"item_clicked" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT,gobject.TYPE_STRING,))
}
def __init__(self,
crumb,
arrow_right=ui_theme.get_pixbuf("treeview/arrow_right.png"),
arrow_down=ui_theme.get_pixbuf("treeview/arrow_down.png"),
show_others=False,
show_entry=False,
show_left_right_box=True
):
'''
Initialize Bread class.
@param crumb: Crumb instance or a list of crumb instances
@param arrow_right: Dynamic pixbuf for right arrow, default is \"treeview/arrow_right.png\" from ui theme.
@param arrow_down: Dynamic pixbuf for down arrow, default is \"treeview/arrow_down.png\" from ui theme.
@param show_others: If True, crumbs will not be destroyed, otherwise all crumbs on the right side will be destroyed.
@param show_entry: If True, an entry will pop up when click space area in Bread.
'''
# Init.
super(Bread, self).__init__(spacing = 0)
self.arrow_right = arrow_right
self.arrow_down = arrow_down
self.item_list = list()
self.show_others = show_others
self.show_entry = show_entry
self.crumb = self.create_crumb(crumb)
self.button_width = ARROW_BUTTON_WIDTH # for left & right buttons
self.in_event_box = False
# Init left button and right button.
self.show_left_right_box = show_left_right_box
left_box = gtk.HBox(spacing = 0)
right_box = gtk.HBox(spacing = 0)
# FIXME: left && right box static setting size
# it is better to consider whether or not shown left && right box
# at runtime
if self.show_left_right_box:
left_box.set_size_request(self.button_width, -1)
right_box.set_size_request(self.button_width, -1)
self.left_btn = Button("<")
self.right_btn = Button(">")
self.left_btn.set_no_show_all(True)
self.right_btn.set_no_show_all(True)
self.right_btn.connect("clicked", self.move_right)
self.left_btn.connect("clicked", self.move_left)
self.left_btn.set_size_request(self.button_width, -1)
self.right_btn.set_size_request(self.button_width, -1)
left_box.pack_start(self.left_btn, False, False)
right_box.pack_start(self.right_btn, False, False)
# Init Hbox
self.hbox = gtk.HBox(False, 0)
self.hbox.show()
self.eventbox = gtk.EventBox()
self.eventbox.set_visible_window(False)
if self.show_entry:
self.eventbox.connect("enter-notify-event", self.enter_notify)
self.eventbox.connect("leave-notify-event", self.leave_notify)
self.eventbox.connect("button-press-event", self.event_box_press)
self.hbox.pack_end(self.eventbox, True, True)
self.scroll_win = ScrolledWindow()
self.pack_start(left_box, False, True)
self.pack_start(self.hbox, True, True)
# Add Bread Items
self.adj = self.scroll_win.get_hadjustment()
self.add(self.crumb)
def create_crumb(self, crumb):
'''
Internal function to create a Crumb list for different types of inputs.
@param crumb: Support inputs are:
["a label", Menu]
[("a label",[(None, "menu label", None)])]
Crumb instance
[Crumb, Crumb]
'''
if isinstance(crumb, Crumb):
return [crumb,]
elif isinstance(crumb[0], str):
return [Crumb(crumb[0], crumb[1]),]
elif isinstance(crumb[0], Crumb):
return crumb
else:
return [Crumb(c[0], c[1]) for c in crumb]
def enter_notify(self, widget, event):
'''
Internal callback function to "enter-notify-event" signal.
@param widget: gtk.EventBox.
@param event: The pointer event of type gtk.gdk.Event.
'''
self.in_event_box = True
def leave_notify(self, widget, event):
'''
Internal callback function to "leave-notify-event" signal.
@param widget: Gtk.EventBox.
@param event: The pointer event of type gtk.gdk.Event.
'''
self.in_event_box = False
def event_box_press(self, widget, event):
'''
Internal callback function to "button-press-event" signal.
@param widget: gtk.eventbox.
@param event: event of type gtk.gdk.event.
'''
obj = self.hbox.get_children()
label = []
for o in obj[:-1]:
label.append("/"+o.label)
o.destroy()
self.entry = gtk.Entry()
self.entry.connect("activate", self.enter_cb)
self.entry.set_text("".join(label))
self.entry.show()
self.entry.select_region(0, len(self.entry.get_text()))
self.eventbox.hide()
self.hbox.pack_start(self.entry, True, True)
def enter_cb(self, widget):
'''
Internal callback function to "press-return" signal.
@param widget: gtk.Entry widget instance.
'''
label = widget.get_text()
widget.destroy()
self.eventbox.show()
self.emit("entry-changed", label)
def redraw_bg(self, widget, event):
'''
Internal callback function to "expose-event" signal.
@param widget: gtk.EventBox
@param event: event of type gtk.gdk.event
'''
cr = widget.window.cairo_create()
rect = widget.allocation
# Draw backgroud.
with cairo_state(cr):
cr.set_source_rgba(*alpha_color_hex_to_cairo(("#def5ff", 1)))
cr.rectangle(rect.x, rect.y, rect.width, rect.height)
cr.fill()
return False
def add(self, crumbs):
'''
Add crumbs. Can accept Crumb instance or a list of Crumb instances
@param crumbs: Supported inputs are:
["a label", Menu]
[("a label",[(None, "menu label", None)])]
Crumb instance
[Crumb, Crumb]
'''
crumbs = self.create_crumb(crumbs)
for crumb in crumbs:
crumb.show()
crumb.arrow_right = self.arrow_right
crumb.arrow_down = self.arrow_down
crumb.index_id = len(self.item_list)
crumb.connect("item_clicked", self.click_cb)
self.hbox.pack_start(crumb, False, False)
self.item_list.append(crumb.get_size_request()[0])
page_size = self.adj.page_size
# Show right button if crumbs exceed scrolled window size.
if sum(self.item_list) > page_size and not page_size == 1.0:
self.right_btn.show()
def change_node(self, index, crumbs):
'''
Change any nodes start from specified index
@param index: Start index
@param crumbs: Crumb instance or Crumb list
For instance, there exist a list contain [Crumb1, Crumb2],
by using change_node(1, [Crumb3, Crumb4]), previous list will be change
to [Crumb1, Crumb3, Crumb4]. In this way, application can operate crumbs
'''
objects = self.hbox.get_children()
for i in objects[index: -1]:
i.destroy()
self.item_list[index:] = []
self.add(crumbs)
def remove_node_after_index(self, index):
'''
Remove any nodes after given index.
@param index: To specified remove after given index.
'''
for i in self.hbox.get_children()[(index + 1): -1]:
i.destroy()
self.item_list[(index + 1):] = []
def click_cb(self, widget, index, label):
'''
Internal callback function to "clicked" signal.
@param widget: Crumb instance.
@param index: The index value of clicked crumb.
@param label: Label of the crumb.
'''
if not self.show_others:
for i in self.hbox.get_children()[(index + 1): -1]:
i.destroy()
self.item_list[(index + 1):] = []
self.emit("item_clicked", index, label)
def move_right(self, widget):
'''
Internal callback function to "clicked" signal.
@param widget: Right button.
'''
upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value
shift_value = 0
temp = 0
if upper > (page_size + value):
self.left_btn.show()
for i in xrange(len(self.item_list)+1):
temp += self.item_list[i]
if temp > (page_size + value):
shift_value = temp - (page_size + value)
#play animation
ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value+shift_value])
ani.start()
break
if not upper > (page_size + self.adj.value + shift_value):
self.right_btn.hide()
def move_left(self, widget):
'''
Internal callback function to "clicked" signal.
@param widget: Left button.
'''
upper, page_size, value = self.adj.upper, self.adj.page_size, self.adj.value
shift_value = 0
temp = 0
if not value == 0:
self.right_btn.show()
for i in xrange(len(self.item_list)):
temp += self.item_list[i]
if temp >= value:
shift_value = self.item_list[i] - (temp - value)
break
#play animation
ani = Animation(self.adj, lambda widget, v1: widget.set_value(v1),200,[value, value-shift_value])
ani.start()
if (self.adj.value - shift_value) == 0:
self.left_btn.hide()
def set_size(self, width, height):
'''
Set Bread size.
@param width: Width of Bread.
@param height: Height of Bread.
'''
self.scroll_win.set_size_request(width - 2 * self.button_width, height)
self.hbox.set_size_request(-1, self.hbox.get_children()[0].height)
gobject.type_register(Bread)
class BreadMenu(Poplist):
'''
Popup menu for bread.
@undocumented: draw_treeview_mask
@undocumented: shape_bread_menu_frame
@undocumented: expose_bread_menu_frame
'''
def __init__(self,
items,
max_height=None,
max_width=None,
):
'''
Initialize BreadMenu class.
@param items: Item for TreeView.
@param max_height: Maximum height of bread menu, by default is None.
@param max_width: Maximum width of bread menu, by default is None.
'''
Poplist.__init__(self,
items=items,
max_height=max_height,
max_width=max_width,
shadow_visible=False,
shape_frame_function=self.shape_bread_menu_frame,
expose_frame_function=self.expose_bread_menu_frame,
align_size=2,
)
self.set_skip_pager_hint(True)
self.set_skip_taskbar_hint(True)
self.treeview.draw_mask = self.draw_treeview_mask
self.expose_window_frame = self.expose_bread_menu_frame
def draw_treeview_mask(self, cr, x, y, w, h):
cr.set_source_rgb(1, 1, 1)
cr.rectangle(x, y, w, h)
cr.fill()
def shape_bread_menu_frame(self, widget, event):
pass
def expose_bread_menu_frame(self, widget, event):
cr = widget.window.cairo_create()
rect = widget.allocation
with cairo_disable_antialias(cr):
outside_border = alpha_color_hex_to_cairo(("#666666", 0.5))
cr.set_line_width(1)
cr.set_source_rgba(*outside_border)
cr.rectangle(rect.x + 1, rect.y + 1, rect.width - 2, rect.height - 2)
cr.fill()
gobject.type_register(BreadMenu)
class Crumb(gtk.Button):
'''
Crumb class .
@undocumented: enter_button
@undocumented: motion_notify_cb
@undocumented: create_menu
@undocumented: hide_cb
@undocumented: button_press_cb
@undocumented: button_clicked
@undocumented: expose_cb
'''
__gsignals__= {
"item_clicked" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_INT,gobject.TYPE_STRING,))}
def __init__(self,
label,
menu_items = None,
font_size = DEFAULT_FONT_SIZE,
padding_x = 15,
):
'''
Initialize Crumb class.
@param label: Crumb item label
@param menu_items: Crumb menu, could be a Menu instance or a list, default is None
@param font_size: Font size, default is DEFAULT_FONT_SIZE.
@param padding_x: Horizontal padding, default is 15 pixels.
'''
super(Crumb, self).__init__()
self.arrow_right = None
self.arrow_down = None
self.menu_min = 18 # menu bar width
self.btn_min = 50 # button width
self.height = 24 # crumb height
self.font_size = font_size
self.padding_x = padding_x
self.menu = self.create_menu(menu_items)
if self.menu != None:
self.menu.connect("hide", self.hide_cb)
self.menu_press = False
self.menu_show = False
self.index_id = 0
self.set_label(label)
self.in_button = True
self.in_menu = True
self.connect("expose_event", self.expose_cb)
self.connect("button_press_event", self.button_press_cb)
self.connect("clicked", self.button_clicked)
self.connect("motion-notify-event", self.motion_notify_cb)
self.connect("enter-notify-event", self.enter_button)
self.add_events(gtk.gdk.POINTER_MOTION_MASK)
def enter_button(self, widget, event):
in_menu = event.x > self.button_width
self.in_menu =in_menu
def motion_notify_cb(self, widget, event):
'''
Internal callback function to Crumb "motion-notify-event" signal.
@param widget: Crumb
@param event: an event of gtk.gdk.event
'''
in_menu = event.x > self.button_width
if self.in_menu !=in_menu:
self.in_menu = in_menu
self.queue_draw()
def create_menu(self, menu_items):
'''
Internal function to create menu.
@param menu_items: menu_items
@return: Menu instance
'''
if menu_items != None and isinstance(menu_items, list):
return BreadMenu(menu_items)
else:
return None
def hide_cb(self, widget):
'''
Internal callback function to Menu's ""hide" signal.
@param widget: Menu
'''
if self.menu_press:
self.set_state(gtk.STATE_PRELIGHT)
else:
self.menu_show = False
self.set_state(gtk.STATE_NORMAL)
def button_press_cb(self, widget, event):
'''
Internal callback function to "button-press-event" signal.
@param widget: Crumb
@param event: An event of gtk.gdk.Event
'''
if self.menu == None:
self.in_button = True
self.menu_press = False
else:
self.in_button = event.x < (widget.allocation.width - self.menu_min)
if not self.in_button:
self.menu_press = True
def button_clicked(self, widget):
'''
Intenal callback function to "clicked" signal.
@param widget: Crumb
'''
if self.in_button:
self.emit("item_clicked", self.index_id, self.label)
else:
self.menu_press = False
self.menu_show = not self.menu_show
if self.menu_show:
(wx, wy) = self.get_toplevel().window.get_root_origin()
(offset_x, offset_y) = widget.translate_coordinates(self.get_toplevel(), 0, 0)
(menu_width, menu_height) = widget.allocation.width, widget.allocation.height
arrow_button_width = ARROW_BUTTON_WIDTH
self.menu.show((wx + offset_x + menu_width - arrow_button_width,
wy + offset_y + menu_height,
),
(0, 0))
def set_label(self, label, font_size = DEFAULT_FONT_SIZE):
'''
Set label for left button.
@param label: Label
@param font_size: Label's Font size, default is DEFAULT_FONT_SIZE.
'''
self.label = label
(self.label_w, self.label_h) = get_content_size(self.label, font_size)
if self.menu == None:
self.set_size_request(
max(self.label_w + 2 * self.padding_x, self.btn_min),
self.height)
self.button_width = self.get_size_request()[0]
else:
self.set_size_request(
max(self.label_w + 2 * self.padding_x + self.menu_min, self.btn_min + self.menu_min),
self.height)
self.button_width = self.get_size_request()[0] - self.menu_min
self.queue_draw()
def expose_cb(self, widget, event):
'''
Internal expose callback function.
@param widget: Crumb instance.
@param event: An event of gtk.gdk.Event.
'''
if self.menu == None:
self.menu_min = 0
cr = widget.window.cairo_create()
rect = widget.allocation
x, y, w, h = rect.x, rect.y, rect.width, rect.height
# Should move this part to Bread class since app_theme is golobalized.
arrow_right = self.arrow_right
arrow_down = self.arrow_down
arrow_width, arrow_height = arrow_right.get_pixbuf().get_width(), arrow_right.get_pixbuf().get_height()
arrow_pixbuf = arrow_right
outside_border = alpha_color_hex_to_cairo(("#000000", 0.15))
inner_border = alpha_color_hex_to_cairo(("#ffffff", 0.5))
active_mask = alpha_color_hex_to_cairo(("#000000", 0.1))
if self.menu_show:
self.set_state(gtk.STATE_PRELIGHT)
if widget.state == gtk.STATE_NORMAL:
text_color = ui_theme.get_color("title_text").get_color()
button_color = None
menu_color = None
arrow_pixbuf = arrow_right
elif widget.state == gtk.STATE_PRELIGHT:
text_color = ui_theme.get_color("title_text").get_color()
if self.menu_show:
arrow_pixbuf = arrow_down
else:
arrow_pixbuf = arrow_right
if self.in_menu:
button_color = None
menu_color = inner_border
else:
button_color = inner_border
menu_color = None
elif widget.state == gtk.STATE_ACTIVE:
text_color = ui_theme.get_color("title_text").get_color()
if self.in_button:
button_color = inner_border
menu_color = None
arrow_pixbuf = arrow_right
else:
button_color = None
menu_color = inner_border
arrow_pixbuf = arrow_down
elif widget.state == gtk.STATE_INSENSITIVE:
arrow_pixbuf = arrow_right
text_color = ui_theme.get_color("disable_text").get_color()
disable_bg = ui_theme.get_color("disable_background").get_color()
button_color = [(0, (disable_bg, 1.0)),
(1, (disable_bg, 1.0))]
menu_color = [(0, (disable_bg, 1.0)),
(1, (disable_bg, 1.0))]
# Draw background.
if not widget.state == gtk.STATE_NORMAL:
# Draw button border.
def draw_rectangle(cr, x, y , w, h):
draw_line(cr, x -1 , y , x + w, y) # top
draw_line(cr, x , y + h, x + w, y + h) # bottom
draw_line(cr, x , y , x , y + h) # left
draw_line(cr, x + w , y , x + w , y + h -1) # right
cr.set_source_rgba(*outside_border)
if button_color:
draw_rectangle(cr, x + 1 , y + 1 , self.button_width -1 , h -1)
elif menu_color:
draw_rectangle(cr, x + self.button_width, y + 1, self.menu_min, h - 1)
# Draw innner border.
cr.set_source_rgba(*inner_border)
if button_color:
draw_rectangle(cr, x + 2, y + 2, self.button_width - 3, h -3)
elif menu_color:
draw_rectangle(cr, x + self.button_width + 1, y + 2, self.menu_min - 2, h -3)
if widget.state == gtk.STATE_ACTIVE:
cr.set_source_rgba(*active_mask)
if button_color:
cr.rectangle(x + 2, y + 2, self.button_width - 4, h -4)
cr.fill()
elif menu_color:
cr.rectangle( x + self.button_width + 1, y + 2, self.menu_min - 3, h -4)
cr.fill()
if self.menu != None:
# Draw an arrow.
draw_pixbuf(cr, arrow_pixbuf.get_pixbuf(), x + self.button_width + (self.menu_min - arrow_width) / 2, y + (h - arrow_height) / 2)
# Draw text.
draw_text(cr, self.label, x, y , self.button_width, h, self.font_size, text_color,
alignment = pango.ALIGN_CENTER)
return True
gobject.type_register(Crumb)
if __name__ == "__main__":
import gtk
def add_panel(widget):
crumb = Crumb("Child",menu)
bread.add(crumb)
def change_root_node( widget):
crumb1 = Crumb("Yet Another Root", menu)
crumb2 = Crumb("Yet Another Child", menu)
bread.change_node(0, [crumb1, crumb2])
def change_entry(widget, path):
# Application can check if path is valid or not
path_list = path.split("/")[1:]
bread.change_node(0, [Crumb(i , menu) for i in path_list])
menu = Menu([
(None, "测试1", None),
(None, "测试2", None),
],
shadow_visible = False,
)
win = gtk.Window(gtk.WINDOW_TOPLEVEL)
win.connect("destroy", lambda w: gtk.main_quit())
win.set_default_size(600,300)
vbox = gtk.VBox()
######################################
# test breadcrumb widget
bread = Bread([("Root", menu),
("Level1", menu)],
show_others = False,
show_entry = True)
bread.add(["xxx",menu])
# Must set_size
bread.set_size(200, -1)
bread.connect("entry-changed", change_entry)
#####################################
vbox.pack_start(bread, False, False, 0)
# Test Item
add_path_button = gtk.Button("Add Item")
add_path_button.connect("clicked", add_panel)
vbox.pack_start(add_path_button, True, False, 0)
test_change_node = gtk.Button("Change Root node")
test_change_node.connect("clicked", change_root_node)
vbox.pack_start(test_change_node, True, False , 0)
win.add(vbox)
win.show_all()
gtk.main()
|
[
"gtk.EventBox",
"gtk.Entry",
"gtk.Window",
"utils.alpha_color_hex_to_cairo",
"gtk.main_quit",
"gtk.VBox",
"draw.draw_text",
"menu.Menu",
"gobject.type_register",
"gtk.main",
"theme.ui_theme.get_color",
"utils.get_content_size",
"scrolled_window.ScrolledWindow",
"gtk.Button",
"gtk.HBox",
"utils.cairo_disable_antialias",
"button.Button",
"draw.draw_line",
"theme.ui_theme.get_pixbuf",
"utils.cairo_state",
"poplist.Poplist.__init__"
] |
[((12004, 12032), 'gobject.type_register', 'gobject.type_register', (['Bread'], {}), '(Bread)\n', (12025, 12032), False, 'import gobject\n'), ((13880, 13912), 'gobject.type_register', 'gobject.type_register', (['BreadMenu'], {}), '(BreadMenu)\n', (13901, 13912), False, 'import gobject\n'), ((23499, 23527), 'gobject.type_register', 'gobject.type_register', (['Crumb'], {}), '(Crumb)\n', (23520, 23527), False, 'import gobject\n'), ((24054, 24124), 'menu.Menu', 'Menu', (["[(None, '测试1', None), (None, '测试2', None)]"], {'shadow_visible': '(False)'}), "([(None, '测试1', None), (None, '测试2', None)], shadow_visible=False)\n", (24058, 24124), False, 'from menu import Menu\n'), ((24203, 24234), 'gtk.Window', 'gtk.Window', (['gtk.WINDOW_TOPLEVEL'], {}), '(gtk.WINDOW_TOPLEVEL)\n', (24213, 24234), False, 'import gtk\n'), ((24334, 24344), 'gtk.VBox', 'gtk.VBox', ([], {}), '()\n', (24342, 24344), False, 'import gtk\n'), ((24808, 24830), 'gtk.Button', 'gtk.Button', (['"""Add Item"""'], {}), "('Add Item')\n", (24818, 24830), False, 'import gtk\n'), ((24958, 24988), 'gtk.Button', 'gtk.Button', (['"""Change Root node"""'], {}), "('Change Root node')\n", (24968, 24988), False, 'import gtk\n'), ((25144, 25154), 'gtk.main', 'gtk.main', ([], {}), '()\n', (25152, 25154), False, 'import gtk\n'), ((1955, 2002), 'theme.ui_theme.get_pixbuf', 'ui_theme.get_pixbuf', (['"""treeview/arrow_right.png"""'], {}), "('treeview/arrow_right.png')\n", (1974, 2002), False, 'from theme import ui_theme\n'), ((2032, 2078), 'theme.ui_theme.get_pixbuf', 'ui_theme.get_pixbuf', (['"""treeview/arrow_down.png"""'], {}), "('treeview/arrow_down.png')\n", (2051, 2078), False, 'from theme import ui_theme\n'), ((3300, 3319), 'gtk.HBox', 'gtk.HBox', ([], {'spacing': '(0)'}), '(spacing=0)\n', (3308, 3319), False, 'import gtk\n'), ((3342, 3361), 'gtk.HBox', 'gtk.HBox', ([], {'spacing': '(0)'}), '(spacing=0)\n', (3350, 3361), False, 'import gtk\n'), ((3713, 3727), 'button.Button', 'Button', (['"""<"""'], {}), "('<')\n", (3719, 3727), False, 'from button import Button\n'), ((3753, 3767), 'button.Button', 'Button', (['""">"""'], {}), "('>')\n", (3759, 3767), False, 'from button import Button\n'), ((4255, 4273), 'gtk.HBox', 'gtk.HBox', (['(False)', '(0)'], {}), '(False, 0)\n', (4263, 4273), False, 'import gtk\n'), ((4323, 4337), 'gtk.EventBox', 'gtk.EventBox', ([], {}), '()\n', (4335, 4337), False, 'import gtk\n'), ((4724, 4740), 'scrolled_window.ScrolledWindow', 'ScrolledWindow', ([], {}), '()\n', (4738, 4740), False, 'from scrolled_window import ScrolledWindow\n'), ((6531, 6542), 'gtk.Entry', 'gtk.Entry', ([], {}), '()\n', (6540, 6542), False, 'import gtk\n'), ((12623, 12855), 'poplist.Poplist.__init__', 'Poplist.__init__', (['self'], {'items': 'items', 'max_height': 'max_height', 'max_width': 'max_width', 'shadow_visible': '(False)', 'shape_frame_function': 'self.shape_bread_menu_frame', 'expose_frame_function': 'self.expose_bread_menu_frame', 'align_size': '(2)'}), '(self, items=items, max_height=max_height, max_width=\n max_width, shadow_visible=False, shape_frame_function=self.\n shape_bread_menu_frame, expose_frame_function=self.\n expose_bread_menu_frame, align_size=2)\n', (12639, 12855), False, 'from poplist import Poplist\n'), ((18651, 18690), 'utils.get_content_size', 'get_content_size', (['self.label', 'font_size'], {}), '(self.label, font_size)\n', (18667, 18690), False, 'from utils import get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state\n'), ((19920, 19963), 'utils.alpha_color_hex_to_cairo', 'alpha_color_hex_to_cairo', (["('#000000', 0.15)"], {}), "(('#000000', 0.15))\n", (19944, 19963), False, 'from utils import get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state\n'), ((19987, 20029), 'utils.alpha_color_hex_to_cairo', 'alpha_color_hex_to_cairo', (["('#ffffff', 0.5)"], {}), "(('#ffffff', 0.5))\n", (20011, 20029), False, 'from utils import get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state\n'), ((20052, 20094), 'utils.alpha_color_hex_to_cairo', 'alpha_color_hex_to_cairo', (["('#000000', 0.1)"], {}), "(('#000000', 0.1))\n", (20076, 20094), False, 'from utils import get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state\n'), ((23342, 23457), 'draw.draw_text', 'draw_text', (['cr', 'self.label', 'x', 'y', 'self.button_width', 'h', 'self.font_size', 'text_color'], {'alignment': 'pango.ALIGN_CENTER'}), '(cr, self.label, x, y, self.button_width, h, self.font_size,\n text_color, alignment=pango.ALIGN_CENTER)\n', (23351, 23457), False, 'from draw import draw_line, draw_text, draw_pixbuf\n'), ((7440, 7455), 'utils.cairo_state', 'cairo_state', (['cr'], {}), '(cr)\n', (7451, 7455), False, 'from utils import get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state\n'), ((13593, 13620), 'utils.cairo_disable_antialias', 'cairo_disable_antialias', (['cr'], {}), '(cr)\n', (13616, 13620), False, 'from utils import get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state\n'), ((13651, 13693), 'utils.alpha_color_hex_to_cairo', 'alpha_color_hex_to_cairo', (["('#666666', 0.5)"], {}), "(('#666666', 0.5))\n", (13675, 13693), False, 'from utils import get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state\n'), ((24272, 24287), 'gtk.main_quit', 'gtk.main_quit', ([], {}), '()\n', (24285, 24287), False, 'import gtk\n'), ((21877, 21910), 'draw.draw_line', 'draw_line', (['cr', '(x - 1)', 'y', '(x + w)', 'y'], {}), '(cr, x - 1, y, x + w, y)\n', (21886, 21910), False, 'from draw import draw_line, draw_text, draw_pixbuf\n'), ((21943, 21980), 'draw.draw_line', 'draw_line', (['cr', 'x', '(y + h)', '(x + w)', '(y + h)'], {}), '(cr, x, y + h, x + w, y + h)\n', (21952, 21980), False, 'from draw import draw_line, draw_text, draw_pixbuf\n'), ((22012, 22041), 'draw.draw_line', 'draw_line', (['cr', 'x', 'y', 'x', '(y + h)'], {}), '(cr, x, y, x, y + h)\n', (22021, 22041), False, 'from draw import draw_line, draw_text, draw_pixbuf\n'), ((22079, 22120), 'draw.draw_line', 'draw_line', (['cr', '(x + w)', 'y', '(x + w)', '(y + h - 1)'], {}), '(cr, x + w, y, x + w, y + h - 1)\n', (22088, 22120), False, 'from draw import draw_line, draw_text, draw_pixbuf\n'), ((7489, 7529), 'utils.alpha_color_hex_to_cairo', 'alpha_color_hex_to_cairo', (["('#def5ff', 1)"], {}), "(('#def5ff', 1))\n", (7513, 7529), False, 'from utils import get_content_size, cairo_disable_antialias, alpha_color_hex_to_cairo, cairo_state\n'), ((20241, 20273), 'theme.ui_theme.get_color', 'ui_theme.get_color', (['"""title_text"""'], {}), "('title_text')\n", (20259, 20273), False, 'from theme import ui_theme\n'), ((20462, 20494), 'theme.ui_theme.get_color', 'ui_theme.get_color', (['"""title_text"""'], {}), "('title_text')\n", (20480, 20494), False, 'from theme import ui_theme\n'), ((20918, 20950), 'theme.ui_theme.get_color', 'ui_theme.get_color', (['"""title_text"""'], {}), "('title_text')\n", (20936, 20950), False, 'from theme import ui_theme\n'), ((21370, 21404), 'theme.ui_theme.get_color', 'ui_theme.get_color', (['"""disable_text"""'], {}), "('disable_text')\n", (21388, 21404), False, 'from theme import ui_theme\n'), ((21442, 21482), 'theme.ui_theme.get_color', 'ui_theme.get_color', (['"""disable_background"""'], {}), "('disable_background')\n", (21460, 21482), False, 'from theme import ui_theme\n')]
|
from pathlib import Path
result_dir = Path().home().joinpath('module_results/bfast_preanalysis')
start = """
### Start date selection
Pick the date of the timeseries' start.
"""
end = """
### End date selection
Pick the date of the timeseries' end.
"""
select = """
### Satellite selection
Select the satellite(s) you want to include for the pre-analysis.
"""
sr = """
### Selection of collection type
Choose between Surface Reflectance or Top-of-Atmosphere collections for the slected satellites.
"""
stats = """
### Selection of statistics
Select the statistical measure you want to apply and switch on annual for per-year calculations
"""
|
[
"pathlib.Path"
] |
[((39, 45), 'pathlib.Path', 'Path', ([], {}), '()\n', (43, 45), False, 'from pathlib import Path\n')]
|
import sys
sys.path.append('../loader')
# from unaligned_data_loader import UnalignedDataLoader
from datasets.svhn import load_svhn
from datasets.mnist import load_mnist
from datasets.usps import load_usps
# from gtsrb import load_gtsrb
# from synth_traffic import load_syntraffic
from datasets.create_dataloader import create_DataLoader
def return_dataset(data, scale=False, usps=False, all_use='no'):
if data == 'svhn':
train_image, train_label, \
test_image, test_label = load_svhn()
if data == 'mnist':
train_image, train_label, \
test_image, test_label = load_mnist(scale=scale, usps=usps, all_use=all_use)
print(train_image.shape)
if data == 'usps':
train_image, train_label, \
test_image, test_label = load_usps(all_use=all_use)
# if data == 'synth':
# train_image, train_label, \
# test_image, test_label = load_syntraffic()
# if data == 'gtsrb':
# train_image, train_label, \
# test_image, test_label = load_gtsrb()
return train_image, train_label, test_image, test_label
# we don't need target just source
def dataset_read(source, target, batch_size, scale=False, all_use='no'):
# Return train and test loader
S = {}
S_test = {}
# T = {}
# T_test = {}
usps = False
if source == 'usps': # or target == 'usps':
usps = True
train_source, s_label_train, test_source, s_label_test = return_dataset(source, scale=scale,
usps=usps, all_use=all_use)
# train_target, t_label_train, test_target, t_label_test = return_dataset(target, scale=scale, usps=usps,
# all_use=all_use)
S['imgs'] = train_source
S['labels'] = s_label_train
# T['imgs'] = train_target
# T['labels'] = t_label_train
# input target samples for both
S_test['imgs'] = test_source
S_test['labels'] = s_label_test
# T_test['imgs'] = test_target
# T_test['labels'] = t_label_test
scale = 40 if source == 'synth' else 28 if source == 'usps' or target == 'usps' else 32
# scale = 40 if source == 'synth' else 28 if source == 'usps' else 32
# train_loader = UnalignedDataLoader()
train_loader = create_DataLoader(S, batch_size, scale=scale, shuffle=False, )
# dataset = train_loader.load_data()
# test_loader = UnalignedDataLoader()
val_loader = create_DataLoader(S_test, batch_size, scale=scale, shuffle=False)
# dataset_test = test_loader.load_data()
return train_loader, val_loader
|
[
"sys.path.append",
"datasets.mnist.load_mnist",
"datasets.svhn.load_svhn",
"datasets.create_dataloader.create_DataLoader",
"datasets.usps.load_usps"
] |
[((12, 40), 'sys.path.append', 'sys.path.append', (['"""../loader"""'], {}), "('../loader')\n", (27, 40), False, 'import sys\n'), ((2330, 2390), 'datasets.create_dataloader.create_DataLoader', 'create_DataLoader', (['S', 'batch_size'], {'scale': 'scale', 'shuffle': '(False)'}), '(S, batch_size, scale=scale, shuffle=False)\n', (2347, 2390), False, 'from datasets.create_dataloader import create_DataLoader\n'), ((2498, 2563), 'datasets.create_dataloader.create_DataLoader', 'create_DataLoader', (['S_test', 'batch_size'], {'scale': 'scale', 'shuffle': '(False)'}), '(S_test, batch_size, scale=scale, shuffle=False)\n', (2515, 2563), False, 'from datasets.create_dataloader import create_DataLoader\n'), ((499, 510), 'datasets.svhn.load_svhn', 'load_svhn', ([], {}), '()\n', (508, 510), False, 'from datasets.svhn import load_svhn\n'), ((604, 655), 'datasets.mnist.load_mnist', 'load_mnist', ([], {'scale': 'scale', 'usps': 'usps', 'all_use': 'all_use'}), '(scale=scale, usps=usps, all_use=all_use)\n', (614, 655), False, 'from datasets.mnist import load_mnist\n'), ((781, 807), 'datasets.usps.load_usps', 'load_usps', ([], {'all_use': 'all_use'}), '(all_use=all_use)\n', (790, 807), False, 'from datasets.usps import load_usps\n')]
|
import logging
import time
from aiogram import Dispatcher, types
from aiogram.dispatcher.middlewares import BaseMiddleware
HANDLED_STR = ["Unhandled", "Handled"]
class LoggingMiddleware(BaseMiddleware):
def __init__(self, logger=None):
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(self.__class__.__name__)
self.logger = logger
super(LoggingMiddleware, self).__init__()
def check_timeout(self, obj):
start = obj.conf.get("_start", None)
if start:
del obj.conf["_start"]
return round((time.time() - start) * 1000)
return -1
async def on_pre_process_update(self, update: types.Update, data: dict):
update.conf["_start"] = time.time()
pass
async def on_post_process_update(self, update: types.Update, result, data: dict):
timeout = self.check_timeout(update)
if timeout > 0:
self.logger.info(
f"Process update [ID:{update.update_id}]: [success] (in {timeout} ms)"
)
async def on_pre_process_message(self, message: types.Message, data: dict):
self.logger.info(
f'Received message [TEXT: "{message.text}"] in chat [{message.from_user.first_name} {message.from_user.username} {message.from_user.id}]'
)
async def on_post_process_message(
self, message: types.Message, results, data: dict
):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]")
async def on_pre_process_edited_message(self, edited_message, data: dict):
pass
# self.logger.info(f"Received edited message [ID:{edited_message.message_id}] "
# f"in chat [{edited_message.chat.type}:{edited_message.chat.id}]")
async def on_post_process_edited_message(self, edited_message, results, data: dict):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"edited message [ID:{edited_message.message_id}] "
# f"in chat [{edited_message.chat.type}:{edited_message.chat.id}]")
async def on_pre_process_channel_post(
self, channel_post: types.Message, data: dict
):
pass
# self.logger.info(f"Received channel post [ID:{channel_post.message_id}] "
# f"in channel [ID:{channel_post.chat.id}]")
async def on_post_process_channel_post(
self, channel_post: types.Message, results, data: dict
):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"channel post [ID:{channel_post.message_id}] "
# f"in chat [{channel_post.chat.type}:{channel_post.chat.id}]")
async def on_pre_process_edited_channel_post(
self, edited_channel_post: types.Message, data: dict
):
pass
# self.logger.info(f"Received edited channel post [ID:{edited_channel_post.message_id}] "
# f"in channel [ID:{edited_channel_post.chat.id}]")
async def on_post_process_edited_channel_post(
self, edited_channel_post: types.Message, results, data: dict
):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"edited channel post [ID:{edited_channel_post.message_id}] "
# f"in channel [ID:{edited_channel_post.chat.id}]")
async def on_pre_process_inline_query(
self, inline_query: types.InlineQuery, data: dict
):
pass
# self.logger.info(f"Received inline query [ID:{inline_query.id}] "
# f"from user [ID:{inline_query.from_user.id}]")
async def on_post_process_inline_query(
self, inline_query: types.InlineQuery, results, data: dict
):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"inline query [ID:{inline_query.id}] "
# f"from user [ID:{inline_query.from_user.id}]")
async def on_pre_process_chosen_inline_result(
self, chosen_inline_result: types.ChosenInlineResult, data: dict
):
pass
# self.logger.info(f"Received chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] "
# f"from user [ID:{chosen_inline_result.from_user.id}] "
# f"result [ID:{chosen_inline_result.result_id}]")
async def on_post_process_chosen_inline_result(
self, chosen_inline_result, results, data: dict
):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] "
# f"from user [ID:{chosen_inline_result.from_user.id}] "
# f"result [ID:{chosen_inline_result.result_id}]")
async def on_pre_process_callback_query(
self, callback_query: types.CallbackQuery, data: dict
):
if callback_query.message:
self.logger.info(
f"Received callback query [DATA:{callback_query.data}] "
f"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] "
f"from user [USERNAME:{callback_query.from_user.username}]"
)
async def on_post_process_callback_query(self, callback_query, results, data: dict):
pass
# if callback_query.message:
# if callback_query.message.from_user:
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"callback query [ID:{callback_query.id}] "
# f"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] "
# f"from user [ID:{callback_query.message.from_user.id}]")
# else:
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"callback query [ID:{callback_query.id}] "
# f"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]")
# else:
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"callback query [ID:{callback_query.id}] "
# f"from inline message [ID:{callback_query.inline_message_id}] "
# f"from user [ID:{callback_query.from_user.id}]")
async def on_pre_process_shipping_query(
self, shipping_query: types.ShippingQuery, data: dict
):
pass
# self.logger.info(f"Received shipping query [ID:{shipping_query.id}] "
# f"from user [ID:{shipping_query.from_user.id}]")
async def on_post_process_shipping_query(self, shipping_query, results, data: dict):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"shipping query [ID:{shipping_query.id}] "
# f"from user [ID:{shipping_query.from_user.id}]")
async def on_pre_process_pre_checkout_query(
self, pre_checkout_query: types.PreCheckoutQuery, data: dict
):
pass
# self.logger.info(f"Received pre-checkout query [ID:{pre_checkout_query.id}] "
# f"from user [ID:{pre_checkout_query.from_user.id}]")
async def on_post_process_pre_checkout_query(
self, pre_checkout_query, results, data: dict
):
pass
# self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
# f"pre-checkout query [ID:{pre_checkout_query.id}] "
# f"from user [ID:{pre_checkout_query.from_user.id}]")
async def on_pre_process_error(self, update: types.Update, error, data: dict):
timeout = self.check_timeout(update)
if timeout > 0:
self.logger.info(
f"Process update [ID:{update.update_id}, NAME:{update.__class__.__name__}]: [failed] (in {timeout} ms)"
)
def on_startup(dp: Dispatcher):
dp.middleware.setup(LoggingMiddleware())
|
[
"logging.getLogger",
"time.time"
] |
[((756, 767), 'time.time', 'time.time', ([], {}), '()\n', (765, 767), False, 'import time\n'), ((316, 358), 'logging.getLogger', 'logging.getLogger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (333, 358), False, 'import logging\n'), ((599, 610), 'time.time', 'time.time', ([], {}), '()\n', (608, 610), False, 'import time\n')]
|
from dataclasses import dataclass, field
from typing import Any
@dataclass
class Response(object):
ok: bool = field(default=False)
data: Any = field(default=None)
message: str = field(default='')
|
[
"dataclasses.field"
] |
[((122, 142), 'dataclasses.field', 'field', ([], {'default': '(False)'}), '(default=False)\n', (127, 142), False, 'from dataclasses import dataclass, field\n'), ((160, 179), 'dataclasses.field', 'field', ([], {'default': 'None'}), '(default=None)\n', (165, 179), False, 'from dataclasses import dataclass, field\n'), ((200, 217), 'dataclasses.field', 'field', ([], {'default': '""""""'}), "(default='')\n", (205, 217), False, 'from dataclasses import dataclass, field\n')]
|
from django.db import models
class About(models.Model):
about_image = models.ImageField(upload_to="about/")
about_exp1 = models.TextField(blank=True, null=True)
about_exp2 = models.TextField(blank=True, null=True)
class Programms(models.Model):
name = models.CharField(max_length=255)
icon = models.CharField(max_length=255)
percentage = models.CharField(max_length=25, blank=True, null=True)
|
[
"django.db.models.ImageField",
"django.db.models.TextField",
"django.db.models.CharField"
] |
[((76, 113), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""about/"""'}), "(upload_to='about/')\n", (93, 113), False, 'from django.db import models\n'), ((131, 170), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (147, 170), False, 'from django.db import models\n'), ((188, 227), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (204, 227), False, 'from django.db import models\n'), ((272, 304), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (288, 304), False, 'from django.db import models\n'), ((316, 348), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (332, 348), False, 'from django.db import models\n'), ((366, 420), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(25)', 'blank': '(True)', 'null': '(True)'}), '(max_length=25, blank=True, null=True)\n', (382, 420), False, 'from django.db import models\n')]
|
import asyncio
import logging
import importlib.util
import os.path
import sys
# Debug
import traceback
from jshbot import commands
from jshbot.exceptions import ErrorTypes, BotException
EXCEPTION = 'Plugins'
def add_plugins(bot):
"""
Gets a list of all of the plugins and stores them as a key/value pair of
the plugin name and the module itself (renamed to plugin for the user).
In addition, this also sets the commands given by each plugin.
"""
directory = '{}/plugins'.format(bot.path)
data_directory = '{}/plugins/plugin_data'.format(bot.path)
if os.path.isdir(data_directory):
logging.debug("Setting plugin_data as plugin import path.")
sys.path.append(data_directory)
try:
plugins_list = os.listdir(directory)
except FileNotFoundError:
raise BotException(
EXCEPTION, "Plugins directory not found",
error_type=ErrorTypes.STARTUP)
valid_plugins = {}
# Add base plugin
from jshbot import base
plugin_commands = base.get_commands()
commands.add_commands(bot, plugin_commands, base)
valid_plugins['base'] = [base, plugin_commands]
# Get plugin commands
for plugin in plugins_list:
if (plugin[0] in ('.', '_') or
plugin == 'base' or
not plugin.endswith('.py')):
continue
try:
spec = importlib.util.spec_from_file_location(
plugin, '{}/{}'.format(directory, plugin))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
plugin_commands = module.get_commands()
commands.add_commands(bot, plugin_commands, module)
except Exception as e:
traceback.print_exc()
raise BotException(
EXCEPTION, "Failed to import external plugin",
plugin, e=e, error_type=ErrorTypes.STARTUP)
else:
logging.debug("Adding plugin {}".format(plugin))
valid_plugins[plugin] = [module, plugin_commands]
if len(valid_plugins):
logging.debug("Loaded {} plugin(s)".format(len(valid_plugins)))
bot.plugins = valid_plugins
def broadcast_event(bot, event, *args, **kwargs):
"""
Loops through all of the plugins and looks to see if the event index
specified is associated it. If it is, call that function with args.
"""
for plugin in bot.plugins.values():
function = getattr(plugin[0], event, None)
if function:
try:
asyncio.ensure_future(function(bot, *args, **kwargs))
except TypeError as e:
logging.error(traceback.format_exc())
logging.error("Bypassing event error: " + e)
|
[
"sys.path.append",
"logging.error",
"traceback.print_exc",
"logging.debug",
"jshbot.base.get_commands",
"jshbot.commands.add_commands",
"traceback.format_exc",
"jshbot.exceptions.BotException"
] |
[((1031, 1050), 'jshbot.base.get_commands', 'base.get_commands', ([], {}), '()\n', (1048, 1050), False, 'from jshbot import base\n'), ((1055, 1104), 'jshbot.commands.add_commands', 'commands.add_commands', (['bot', 'plugin_commands', 'base'], {}), '(bot, plugin_commands, base)\n', (1076, 1104), False, 'from jshbot import commands\n'), ((626, 685), 'logging.debug', 'logging.debug', (['"""Setting plugin_data as plugin import path."""'], {}), "('Setting plugin_data as plugin import path.')\n", (639, 685), False, 'import logging\n'), ((694, 725), 'sys.path.append', 'sys.path.append', (['data_directory'], {}), '(data_directory)\n', (709, 725), False, 'import sys\n'), ((824, 914), 'jshbot.exceptions.BotException', 'BotException', (['EXCEPTION', '"""Plugins directory not found"""'], {'error_type': 'ErrorTypes.STARTUP'}), "(EXCEPTION, 'Plugins directory not found', error_type=\n ErrorTypes.STARTUP)\n", (836, 914), False, 'from jshbot.exceptions import ErrorTypes, BotException\n'), ((1655, 1706), 'jshbot.commands.add_commands', 'commands.add_commands', (['bot', 'plugin_commands', 'module'], {}), '(bot, plugin_commands, module)\n', (1676, 1706), False, 'from jshbot import commands\n'), ((1750, 1771), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1769, 1771), False, 'import traceback\n'), ((1790, 1897), 'jshbot.exceptions.BotException', 'BotException', (['EXCEPTION', '"""Failed to import external plugin"""', 'plugin'], {'e': 'e', 'error_type': 'ErrorTypes.STARTUP'}), "(EXCEPTION, 'Failed to import external plugin', plugin, e=e,\n error_type=ErrorTypes.STARTUP)\n", (1802, 1897), False, 'from jshbot.exceptions import ErrorTypes, BotException\n'), ((2714, 2758), 'logging.error', 'logging.error', (["('Bypassing event error: ' + e)"], {}), "('Bypassing event error: ' + e)\n", (2727, 2758), False, 'import logging\n'), ((2674, 2696), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2694, 2696), False, 'import traceback\n')]
|
import zmq
from zmq import ssh
import numpy as np
from environments.inmoov.inmoov_p2p_client_ready import InmoovGymEnv
from .inmoov_server import server_connection, client_ssh_connection, client_connection
SERVER_PORT = 7777
HOSTNAME = 'localhost'
def send_array(socket, A, flags=0, copy=True, track=False):
"""send a numpy array with metadata"""
md = dict(
dtype = str(A.dtype),
shape = A.shape,
)
socket.send_json(md, flags|zmq.SNDMORE)
return socket.send(A, flags, copy=copy, track=track)
def test_inmoov_gym():
while True:
k = input()
try:
# time.sleep(0.5)
action = np.zeros(shape=(joints_num,))
signal = k.split()
joint, move = int(signal[0]), float(signal[1])
action[joint] = move
robot.step(action)
except:
continue
# robot.step()
if __name__ == "__main__":
socket = server_connection()
robot = InmoovGymEnv(debug_mode=True, positional_control=True)
init_pose = robot._inmoov.get_joints_pos()
joints_num = len(init_pose)
while True:
msg = socket.recv_json()
command = msg["command"]
if command == "position":
data = robot.server_step(msg[command])
joint_state, reward, done, infos, px, end_position = data
send_array(socket, joint_state, flags=0, copy=True, track=False)
send_array(socket, np.array(reward), flags=0, copy=True, track=False)
send_array(socket, np.array(done), flags=0, copy=True, track=False)
send_array(socket, px, flags=0, copy=True, track=False)
send_array(socket, end_position, flags=0, copy=True, track=False)
print("message sent")
elif command == "action":
print(1)
elif command == "done":
print(2)
elif command == "reset":
print(3)
|
[
"numpy.zeros",
"environments.inmoov.inmoov_p2p_client_ready.InmoovGymEnv",
"numpy.array"
] |
[((972, 1026), 'environments.inmoov.inmoov_p2p_client_ready.InmoovGymEnv', 'InmoovGymEnv', ([], {'debug_mode': '(True)', 'positional_control': '(True)'}), '(debug_mode=True, positional_control=True)\n', (984, 1026), False, 'from environments.inmoov.inmoov_p2p_client_ready import InmoovGymEnv\n'), ((655, 684), 'numpy.zeros', 'np.zeros', ([], {'shape': '(joints_num,)'}), '(shape=(joints_num,))\n', (663, 684), True, 'import numpy as np\n'), ((1452, 1468), 'numpy.array', 'np.array', (['reward'], {}), '(reward)\n', (1460, 1468), True, 'import numpy as np\n'), ((1534, 1548), 'numpy.array', 'np.array', (['done'], {}), '(done)\n', (1542, 1548), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrapy.conf import settings
class BaiduPipeline(object):
def __init__(self):
host = settings['MONGODB_HOST']
port = settings['MONGODB_PORT']
db_name = settings['MONGODB_DBNAME']
client = pymongo.MongoClient(host=host, port=port)
db = client[db_name]
self.post = db[settings['MONGODB_DOCNAME']]
def process_item(self, item, spider):
person_info = dict(item)
self.post.insert(person_info)
return item
|
[
"pymongo.MongoClient"
] |
[((438, 479), 'pymongo.MongoClient', 'pymongo.MongoClient', ([], {'host': 'host', 'port': 'port'}), '(host=host, port=port)\n', (457, 479), False, 'import pymongo\n')]
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: execCmd
Description :
Author : liaozhaoyan
date: 2022/3/19
-------------------------------------------------
Change Activity:
2022/3/19:
-------------------------------------------------
"""
__author__ = 'liaozhaoyan'
import os
import sys
import shlex
from subprocess import PIPE, Popen
from threading import Thread
import select
from .surfException import FileNotExistException
ON_POSIX = 'posix' in sys.builtin_module_names
class CasyncPipe(Thread):
def __init__(self, f, func):
if not os.path.exists(f):
FileNotExistException("%s is not exist." % f)
self.__callBack = func
super(CasyncPipe, self).__init__()
self.daemon = True # thread dies with the program
self.__pipe = open(f, 'r')
self.__loop = True
self.start()
def newCb(self, func):
self.__callBack = func
def run(self):
while self.__loop:
line = self.__pipe.readline().strip()
self.__callBack(line)
def terminate(self):
self.__loop = False
self.join(1)
class CexecCmd(object):
def __init__(self):
pass
def cmd(self, cmds):
p = Popen(shlex.split(cmds), stdout=PIPE)
if sys.version_info.major == 2:
return p.stdout.read().strip()
else:
return p.stdout.read().decode().strip()
def system(self, cmds):
cmds = cmds.replace('\0', '').strip()
return os.popen(cmds).read(8192)
class CasyncCmdQue(object):
def __init__(self, cmd):
super(CasyncCmdQue, self).__init__()
self.daemon = True # thread dies with the program
self.__p = Popen(shlex.split(cmd), stdout=PIPE, stdin=PIPE, close_fds=ON_POSIX)
self.__e = select.epoll()
self.__e.register(self.__p.stdout.fileno(), select.EPOLLIN)
def __del__(self):
self.__p.kill()
def write(self, cmd):
try:
self.__p.stdin.write(cmd.encode())
self.__p.stdin.flush()
except IOError:
return -1
def writeLine(self, cmd):
self.write(cmd + "\n")
def read(self, tmout=0.2, l=16384):
while True:
es = self.__e.poll(tmout)
if not es:
return ""
for f, e in es:
if e & select.EPOLLIN:
if sys.version_info.major == 2:
s = os.read(f, l)
else:
s = os.read(f, l).decode()
return s
def readw(self, want, tries=100):
i = 0
r = ""
while i < tries:
line = self.read()
if want in line:
return r + line
r += line
i += 1
raise Exception("get want args %s overtimes" % want)
def terminate(self):
self.__p.terminate()
return self.__p.wait()
if __name__ == "__main__":
pass
|
[
"os.read",
"os.popen",
"os.path.exists",
"shlex.split",
"select.epoll"
] |
[((1878, 1892), 'select.epoll', 'select.epoll', ([], {}), '()\n', (1890, 1892), False, 'import select\n'), ((646, 663), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (660, 663), False, 'import os\n'), ((1311, 1328), 'shlex.split', 'shlex.split', (['cmds'], {}), '(cmds)\n', (1322, 1328), False, 'import shlex\n'), ((1796, 1812), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (1807, 1812), False, 'import shlex\n'), ((1582, 1596), 'os.popen', 'os.popen', (['cmds'], {}), '(cmds)\n', (1590, 1596), False, 'import os\n'), ((2534, 2547), 'os.read', 'os.read', (['f', 'l'], {}), '(f, l)\n', (2541, 2547), False, 'import os\n'), ((2602, 2615), 'os.read', 'os.read', (['f', 'l'], {}), '(f, l)\n', (2609, 2615), False, 'import os\n')]
|
import importlib
import json
import logging
import os
import subprocess
from typing import Any, Dict, NamedTuple, Tuple
from django.apps import AppConfig
from django.conf import settings
from . import (
definitions_registry,
extract_views_from_urlpatterns,
global_types,
template_registry,
type_registry,
value_registry,
)
from .serialization import create_schema
logger = logging.getLogger("django.server")
def get_urls_schema() -> Dict[str, Any]:
urlconf = importlib.import_module(settings.ROOT_URLCONF)
urlpatterns = urlconf.urlpatterns # type: ignore[attr-defined]
from django.urls import converters
from django.urls.resolvers import RoutePattern
converter_mapping = {
converters.IntConverter: "number",
converters.StringConverter: "string",
converters.UUIDConverter: "string",
converters.SlugConverter: "string",
converters.PathConverter: "string",
}
urls = extract_views_from_urlpatterns(urlpatterns) # type: ignore[no-untyped-call]
reverse = {}
for _, regex, name, pattern in urls:
if not isinstance(pattern, RoutePattern):
continue
reverse[name or regex] = {
"route": f"/{regex}",
"args": {
arg_name: converter_mapping.get(arg_converter.__class__, "string")
for arg_name, arg_converter in pattern.converters.items()
},
}
return reverse
def get_types_schema() -> Any:
""" The package json-schema-to-typescript does expose a way to
automatically export any interface it sees. However, this can bloat our
generated files.
Instead, while creating the schema, we occasionally run into types that we
want available globally but are not directly referenced by templates.
These aren't exported by `json-schem-to-typescript` because they're
referenced using `tsType`, so the libraary is unaware of their usage.
So we register them in `globals` and force `json-schema-to-typescript` to
expose them.
We can't just add these types to the `type_registry` because that's only
parsed once when generating the parent tuple.
We could explore doing two passes in the future.
See `unreachableDefinitions` in json-schema-to-typescript
"""
type_registry["globals"] = Any # type: ignore[assignment]
context_processors = []
from .serialization.context_processors import create_context_processor_type
for engine in settings.TEMPLATES:
if engine["BACKEND"] == "reactivated.backend.JSX":
context_processors.extend(engine["OPTIONS"]["context_processors"]) # type: ignore[index]
type_registry["Context"] = create_context_processor_type(context_processors)
ParentTuple = NamedTuple("ParentTuple", type_registry.items()) # type: ignore[misc]
parent_schema, definitions = create_schema(ParentTuple, definitions_registry)
definitions_registry.update(definitions)
return {
"definitions": definitions,
**{
**definitions["reactivated.apps.ParentTuple"],
"properties": {
**definitions["reactivated.apps.ParentTuple"]["properties"],
"globals": {
"type": "object",
"additionalProperties": False,
"required": list(global_types.keys()),
"properties": global_types,
},
},
},
}
def get_templates() -> Dict[str, Tuple[Any]]:
return template_registry
def get_values() -> Dict[str, Any]:
return value_registry
def get_schema() -> str:
schema = {
"urls": get_urls_schema(),
"templates": get_templates(),
"types": get_types_schema(),
"values": get_values(),
}
return json.dumps(schema, indent=4)
class ReactivatedConfig(AppConfig):
name = "reactivated"
def ready(self) -> None:
"""
Django's dev server actually starts twice. So we prevent generation on
the first start. TODO: handle noreload.
"""
schema = get_schema()
if (
os.environ.get("WERKZEUG_RUN_MAIN") == "true"
or os.environ.get("RUN_MAIN") == "true"
):
# Triggers for the subprocess of the dev server after restarts or initial start.
pass
is_server_started = "DJANGO_SEVER_STARTING" in os.environ
if is_server_started is False:
os.environ["DJANGO_SEVER_STARTING"] = "true"
return
generate_schema(schema)
def generate_schema(schema: str, skip_cache: bool = False) -> None:
"""
For development usage only, this requires Node and Python installed
You can use this function for your E2E test prep.
"""
logger.info("Generating interfaces and client side code")
encoded_schema = schema.encode()
import hashlib
digest = hashlib.sha1(encoded_schema).hexdigest().encode()
if skip_cache is False and os.path.exists("client/generated/index.tsx"):
with open("client/generated/index.tsx", "r+b") as existing:
already_generated = existing.read()
if digest in already_generated:
logger.info("Skipping generation as nothing has changed")
return
#: Note that we don't pass the file object to stdout, because otherwise
# webpack gets confused with the half-written file when we make updates.
# Maybe there's a way to force it to be a single atomic write? I tried
# open('w+b', buffering=0) but no luck.
process = subprocess.Popen(
["node", "./node_modules/reactivated/generator.js"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
)
out, error = process.communicate(encoded_schema)
os.makedirs("client/generated", exist_ok=True)
with open("client/generated/index.tsx", "w+b") as output:
output.write(b"// Digest: %s\n" % digest)
output.write(out)
logger.info("Finished generating.")
|
[
"subprocess.Popen",
"os.makedirs",
"importlib.import_module",
"hashlib.sha1",
"os.path.exists",
"json.dumps",
"os.environ.get",
"logging.getLogger"
] |
[((400, 434), 'logging.getLogger', 'logging.getLogger', (['"""django.server"""'], {}), "('django.server')\n", (417, 434), False, 'import logging\n'), ((492, 538), 'importlib.import_module', 'importlib.import_module', (['settings.ROOT_URLCONF'], {}), '(settings.ROOT_URLCONF)\n', (515, 538), False, 'import importlib\n'), ((3827, 3855), 'json.dumps', 'json.dumps', (['schema'], {'indent': '(4)'}), '(schema, indent=4)\n', (3837, 3855), False, 'import json\n'), ((5610, 5730), 'subprocess.Popen', 'subprocess.Popen', (["['node', './node_modules/reactivated/generator.js']"], {'stdout': 'subprocess.PIPE', 'stdin': 'subprocess.PIPE'}), "(['node', './node_modules/reactivated/generator.js'],\n stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n", (5626, 5730), False, 'import subprocess\n'), ((5816, 5862), 'os.makedirs', 'os.makedirs', (['"""client/generated"""'], {'exist_ok': '(True)'}), "('client/generated', exist_ok=True)\n", (5827, 5862), False, 'import os\n'), ((5019, 5063), 'os.path.exists', 'os.path.exists', (['"""client/generated/index.tsx"""'], {}), "('client/generated/index.tsx')\n", (5033, 5063), False, 'import os\n'), ((4156, 4191), 'os.environ.get', 'os.environ.get', (['"""WERKZEUG_RUN_MAIN"""'], {}), "('WERKZEUG_RUN_MAIN')\n", (4170, 4191), False, 'import os\n'), ((4217, 4243), 'os.environ.get', 'os.environ.get', (['"""RUN_MAIN"""'], {}), "('RUN_MAIN')\n", (4231, 4243), False, 'import os\n'), ((4937, 4965), 'hashlib.sha1', 'hashlib.sha1', (['encoded_schema'], {}), '(encoded_schema)\n', (4949, 4965), False, 'import hashlib\n')]
|
import threading
from pynput import keyboard
class KeyPresses():
def __init__(self):
self.keep_from_dying_thread = None
self.holding_shift = False
self.key_listener = keyboard.Listener(on_press=self.on_keydown, on_release=self.on_keyup)
self.key_listener.start()
def on_keydown(self, key: keyboard.Key):
char = hasattr(key, 'char')
if char:
if self.holding_shift and key.char.lower() == 'b':
print('Shift B')
else:
if key == keyboard.Key.esc:
self.key_listener.stop()
self.keep_from_dying_thread.cancel()
if key == keyboard.Key.shift:
self.holding_shift = True
def on_keyup(self, key: keyboard.Key):
if key == keyboard.Key.shift:
self.holding_shift = False
def keep_from_dying(self):
self.keep_from_dying_thread = threading.Timer(1000000, lambda : None)
self.keep_from_dying_thread.start()
k = KeyPresses()
k.keep_from_dying()
|
[
"threading.Timer",
"pynput.keyboard.Listener"
] |
[((197, 266), 'pynput.keyboard.Listener', 'keyboard.Listener', ([], {'on_press': 'self.on_keydown', 'on_release': 'self.on_keyup'}), '(on_press=self.on_keydown, on_release=self.on_keyup)\n', (214, 266), False, 'from pynput import keyboard\n'), ((922, 961), 'threading.Timer', 'threading.Timer', (['(1000000)', '(lambda : None)'], {}), '(1000000, lambda : None)\n', (937, 961), False, 'import threading\n')]
|
from precise_bbcode.bbcode.tag import BBCodeTag
from precise_bbcode.tag_pool import tag_pool
class LoadDummyTag(BBCodeTag):
name = 'loaddummy01'
definition_string = '[loaddummy01]{TEXT}[/loaddummy01]'
format_string = '<loaddummy>{TEXT}</loaddummy>'
tag_pool.register_tag(LoadDummyTag)
|
[
"precise_bbcode.tag_pool.tag_pool.register_tag"
] |
[((265, 300), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', (['LoadDummyTag'], {}), '(LoadDummyTag)\n', (286, 300), False, 'from precise_bbcode.tag_pool import tag_pool\n')]
|
# coding=utf-8
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import operator
from datetime import datetime
from functools import reduce
from typing import List
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from apps.utils.log import logger
from apps.models import (
OperateRecordModel,
SoftDeleteModel,
JsonField,
MultiStrSplitByCommaFieldText,
OperateRecordModelManager,
EncryptionField,
)
from apps.log_extract.constants import ExtractLinkType, PIPELINE_TIME_FORMAT
from pipeline.service import task_service
class Strategies(SoftDeleteModel):
"""用户策略表"""
strategy_id = models.AutoField(_("策略ID"), primary_key=True, default=None)
bk_biz_id = models.IntegerField(_("业务ID"), db_index=True)
strategy_name = models.TextField(_("策略名称"))
user_list = MultiStrSplitByCommaFieldText(_("用户ID"))
select_type = models.CharField(_("目标选择类型"), max_length=16)
modules = JsonField(_("模块列表"))
visible_dir = MultiStrSplitByCommaFieldText(_("目录列表"))
file_type = MultiStrSplitByCommaFieldText(_("文件类型"))
operator = models.CharField(_("作业执行人"), max_length=64, default="")
class Meta:
ordering = ["-updated_at"]
class TasksManager(OperateRecordModelManager):
search_fields = ["ip_list", "file_path", "created_by", "remark"]
def search(self, keyword):
if keyword:
filter_query = [Q(**{f"{field}__icontains": keyword}) for field in self.search_fields]
filter_q = reduce(operator.or_, filter_query)
return self.filter(filter_q)
return self
class Tasks(OperateRecordModel):
"""任务记录 一个"下载"行为记作一个"Task" """
objects = TasksManager()
task_id = models.AutoField(_("任务记录id"), primary_key=True)
bk_biz_id = models.IntegerField(_("业务id"), db_index=True)
ip_list = MultiStrSplitByCommaFieldText(_("业务机器ip"))
file_path = MultiStrSplitByCommaFieldText(_("文件列表"))
filter_type = models.CharField(_("过滤类型"), max_length=16, null=True, blank=True)
filter_content = JsonField(_("过滤内容"), null=True, blank=True)
download_status = models.CharField(_("当前文件下载状态"), max_length=64, null=True, blank=True)
expiration_date = models.DateTimeField(_("任务过期时间"), default=None)
pipeline_id = models.CharField(_("流水线ID"), max_length=128, null=True, blank=True, db_index=True)
pipeline_components_id = JsonField(_("流水线组件ID"), null=True, blank=True)
job_task_id = models.BigIntegerField(_("文件分发ID"), null=True, blank=True)
# 调创建上传任务的API
cstone_upload_ticket = models.BigIntegerField(_("上传票据"), null=True, blank=True)
cstone_upload_random = models.TextField(_("上传随机值"), null=True, blank=True)
# 创建中转服务器到云石的上传任务
job_upload_task_id = models.BigIntegerField(_("任务上传ID"), null=True, blank=True) # 查询上传脚本的执行结果, 执行结果里有云石返回的task_id
cstone_upload_task_id = models.BigIntegerField(_("云石上传ID"), null=True, blank=True) # 用于查询中转服务器到云石的上传情况
# 云石上待下载的文件路径
cstone_file_path = models.CharField(_("云石文件路径"), default=None, max_length=64, null=True, blank=True)
# 等到上传完毕后,调创建下载链接的API
cstone_download_task_id = models.BigIntegerField(_("云石任务ID"), null=True, blank=True)
cstone_download_bk_biz_id = models.BigIntegerField(_("云石下载业务ID"), null=True, blank=True)
cstone_download_ticket = models.BigIntegerField(_("下载票据"), null=True, blank=True) # 根据票据向云石网盘发起下载请求
cstone_download_random = models.TextField(_("下载随机值"), null=True, blank=True)
task_process_info = models.TextField(_("任务过程信息"), null=True, blank=True)
remark = models.TextField(_("备注"), null=True, blank=True)
preview_directory = models.CharField(_("预览目录"), null=True, blank=True, max_length=255)
preview_ip = models.TextField(_("预览地址ip"), null=True, blank=True)
preview_time_range = models.CharField(_("预览日期"), max_length=10, null=True, blank=True)
preview_is_search_child = models.BooleanField(_("预览是否搜索子目录"), default=False, blank=True)
preview_start_time = models.CharField(_("预览开始日期"), null=True, blank=True, max_length=20)
preview_end_time = models.CharField(_("预览结束日期"), null=True, blank=True, max_length=20)
ex_data = JsonField(_("额外数据"), null=True, blank=True)
cos_file_name = models.CharField(_("cos对象文件名称"), null=True, blank=True, max_length=255)
link_id = models.IntegerField(_("链路id"), null=True, blank=True)
class Meta:
ordering = ["-created_at"]
def get_link_type(self):
try:
return ExtractLink.objects.get(link_id=self.link_id).link_type
except ExtractLink.DoesNotExist:
return ""
def get_extract(self):
from apps.log_extract.handlers.extract import ExtractLinkFactory
return ExtractLinkFactory.get_link(self.get_link_type())()
def get_link(self):
return ExtractLink.objects.filter(link_id=self.link_id).first()
def total_elapsed(self):
try:
task_status = task_service.get_state(self.pipeline_id)
except Exception:
# 存在多主机,单主机日志下载的情况,因此有可能有些pipeline节点未执行
logger.info("pipeline任务不存在,pipeline_id=>[{}]".format(self.pipeline_id))
return "0s"
component_status_list = []
if "activities" not in self.pipeline_components_id:
return "0s"
for component_id, component_info in self.pipeline_components_id["activities"].items():
# 这里有可能有些pipeline组件并未执行
try:
task_status["children"][component_id]["name"] = component_info["name"]
component_status_list.append(task_status["children"][component_id])
except KeyError:
pass
return f"{self._cal_total_time(component_status_list)}s"
def _cal_total_time(self, components: List[dict]):
return sum(
[
(
datetime.strptime(component["finish_time"], PIPELINE_TIME_FORMAT)
- datetime.strptime(component["start_time"], PIPELINE_TIME_FORMAT)
).seconds
for component in components
if component["finish_time"] is not None
]
)
total_elapsed.short_description = _("总耗时")
def ip_num(self):
return len(self.ip_list)
ip_num.short_description = _("IP数量")
def download_file_detail(self):
all_file_size = sum(int(ip.get("all_origin_file_size", 0)) for ip in self.ex_data.values())
all_file_num = sum(int(ip.get("file_count", 0)) for ip in self.ex_data.values())
all_pack_file_size = sum(int(ip.get("all_pack_file_size", 0)) for ip in self.ex_data.values())
ret = [
f"{_('下载文件总大小')}: {all_pack_file_size}kb",
f"{_('下载原始文件原始总大小')}: {all_file_size}kb",
f"{_('下载文件总数量')}: {all_file_num}",
]
return " ".join(ret)
download_file_detail.short_description = _("下载文件统计")
class ExtractLink(OperateRecordModel):
name = models.CharField(_("链路名称"), max_length=255)
link_id = models.AutoField(_("链路id"), primary_key=True)
link_type = models.CharField(_("链路类型"), max_length=20, default=ExtractLinkType.COMMON.value)
operator = models.CharField(_("执行人"), max_length=255)
op_bk_biz_id = models.IntegerField(_("执行bk_biz_id"))
qcloud_secret_id = EncryptionField(_("腾讯云SecretId"), default="", null=True, blank=True, help_text=_("内网链路不需要填写"))
qcloud_secret_key = EncryptionField(_("腾讯云SecretKey"), default="", null=True, blank=True, help_text=_("内网链路不需要填写"))
qcloud_cos_bucket = models.CharField(
_("腾讯云Cos桶名称"), max_length=255, default="", blank=True, help_text=_("内网链路不需要填写")
)
qcloud_cos_region = models.CharField(
_("腾讯云Cos区域"), max_length=255, default="", blank=True, help_text=_("内网链路不需要填写")
)
is_enable = models.BooleanField(_("是否启用"), default=True)
created_at = models.DateTimeField(_("创建时间"), auto_now_add=True, blank=True, db_index=True, null=True)
class Meta:
verbose_name = _("提取链路 (第一次配置链路之后 需要重新部署saas && 暂时只支持linux及安装了cgwin的系统)")
verbose_name_plural = _("提取链路 (第一次配置链路之后 需要重新部署saas && 暂时只支持linux及安装了cgwin的系统)")
class ExtractLinkHost(models.Model):
target_dir = models.CharField(_("挂载目录"), max_length=255, default="")
bk_cloud_id = models.IntegerField(_("主机云区域id"))
ip = models.GenericIPAddressField(_("主机ip"))
link = models.ForeignKey(ExtractLink, on_delete=models.CASCADE)
class Meta:
verbose_name = _("链路中转机")
verbose_name_plural = _("链路中转机")
|
[
"django.db.models.ForeignKey",
"django.db.models.Q",
"datetime.datetime.strptime",
"pipeline.service.task_service.get_state",
"functools.reduce",
"django.utils.translation.ugettext_lazy"
] |
[((7489, 7497), 'django.utils.translation.ugettext_lazy', '_', (['"""总耗时"""'], {}), "('总耗时')\n", (7490, 7497), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7586, 7595), 'django.utils.translation.ugettext_lazy', '_', (['"""IP数量"""'], {}), "('IP数量')\n", (7587, 7595), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8183, 8194), 'django.utils.translation.ugettext_lazy', '_', (['"""下载文件统计"""'], {}), "('下载文件统计')\n", (8184, 8194), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9653, 9709), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ExtractLink'], {'on_delete': 'models.CASCADE'}), '(ExtractLink, on_delete=models.CASCADE)\n', (9670, 9709), False, 'from django.db import models\n'), ((1991, 2000), 'django.utils.translation.ugettext_lazy', '_', (['"""策略ID"""'], {}), "('策略ID')\n", (1992, 2000), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2070, 2079), 'django.utils.translation.ugettext_lazy', '_', (['"""业务ID"""'], {}), "('业务ID')\n", (2071, 2079), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2133, 2142), 'django.utils.translation.ugettext_lazy', '_', (['"""策略名称"""'], {}), "('策略名称')\n", (2134, 2142), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2190, 2199), 'django.utils.translation.ugettext_lazy', '_', (['"""用户ID"""'], {}), "('用户ID')\n", (2191, 2199), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2236, 2247), 'django.utils.translation.ugettext_lazy', '_', (['"""目标选择类型"""'], {}), "('目标选择类型')\n", (2237, 2247), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2288, 2297), 'django.utils.translation.ugettext_lazy', '_', (['"""模块列表"""'], {}), "('模块列表')\n", (2289, 2297), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2347, 2356), 'django.utils.translation.ugettext_lazy', '_', (['"""目录列表"""'], {}), "('目录列表')\n", (2348, 2356), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2404, 2413), 'django.utils.translation.ugettext_lazy', '_', (['"""文件类型"""'], {}), "('文件类型')\n", (2405, 2413), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2447, 2457), 'django.utils.translation.ugettext_lazy', '_', (['"""作业执行人"""'], {}), "('作业执行人')\n", (2448, 2457), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3057, 3068), 'django.utils.translation.ugettext_lazy', '_', (['"""任务记录id"""'], {}), "('任务记录id')\n", (3058, 3068), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3124, 3133), 'django.utils.translation.ugettext_lazy', '_', (['"""业务id"""'], {}), "('业务id')\n", (3125, 3133), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3194, 3205), 'django.utils.translation.ugettext_lazy', '_', (['"""业务机器ip"""'], {}), "('业务机器ip')\n", (3195, 3205), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3253, 3262), 'django.utils.translation.ugettext_lazy', '_', (['"""文件列表"""'], {}), "('文件列表')\n", (3254, 3262), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3300, 3309), 'django.utils.translation.ugettext_lazy', '_', (['"""过滤类型"""'], {}), "('过滤类型')\n", (3301, 3309), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3380, 3389), 'django.utils.translation.ugettext_lazy', '_', (['"""过滤内容"""'], {}), "('过滤内容')\n", (3381, 3389), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3454, 3467), 'django.utils.translation.ugettext_lazy', '_', (['"""当前文件下载状态"""'], {}), "('当前文件下载状态')\n", (3455, 3467), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3550, 3561), 'django.utils.translation.ugettext_lazy', '_', (['"""任务过期时间"""'], {}), "('任务过期时间')\n", (3551, 3561), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3612, 3622), 'django.utils.translation.ugettext_lazy', '_', (['"""流水线ID"""'], {}), "('流水线ID')\n", (3613, 3622), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3717, 3729), 'django.utils.translation.ugettext_lazy', '_', (['"""流水线组件ID"""'], {}), "('流水线组件ID')\n", (3718, 3729), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3796, 3807), 'django.utils.translation.ugettext_lazy', '_', (['"""文件分发ID"""'], {}), "('文件分发ID')\n", (3797, 3807), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3901, 3910), 'django.utils.translation.ugettext_lazy', '_', (['"""上传票据"""'], {}), "('上传票据')\n", (3902, 3910), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3979, 3989), 'django.utils.translation.ugettext_lazy', '_', (['"""上传随机值"""'], {}), "('上传随机值')\n", (3980, 3989), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4085, 4096), 'django.utils.translation.ugettext_lazy', '_', (['"""任务上传ID"""'], {}), "('任务上传ID')\n", (4086, 4096), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4207, 4218), 'django.utils.translation.ugettext_lazy', '_', (['"""云石上传ID"""'], {}), "('云石上传ID')\n", (4208, 4218), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4323, 4334), 'django.utils.translation.ugettext_lazy', '_', (['"""云石文件路径"""'], {}), "('云石文件路径')\n", (4324, 4334), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4467, 4478), 'django.utils.translation.ugettext_lazy', '_', (['"""云石任务ID"""'], {}), "('云石任务ID')\n", (4468, 4478), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4558, 4571), 'django.utils.translation.ugettext_lazy', '_', (['"""云石下载业务ID"""'], {}), "('云石下载业务ID')\n", (4559, 4571), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4648, 4657), 'django.utils.translation.ugettext_lazy', '_', (['"""下载票据"""'], {}), "('下载票据')\n", (4649, 4657), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4747, 4757), 'django.utils.translation.ugettext_lazy', '_', (['"""下载随机值"""'], {}), "('下载随机值')\n", (4748, 4757), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4823, 4834), 'django.utils.translation.ugettext_lazy', '_', (['"""任务过程信息"""'], {}), "('任务过程信息')\n", (4824, 4834), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4889, 4896), 'django.utils.translation.ugettext_lazy', '_', (['"""备注"""'], {}), "('备注')\n", (4890, 4896), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4963, 4972), 'django.utils.translation.ugettext_lazy', '_', (['"""预览目录"""'], {}), "('预览目录')\n", (4964, 4972), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5047, 5058), 'django.utils.translation.ugettext_lazy', '_', (['"""预览地址ip"""'], {}), "('预览地址ip')\n", (5048, 5058), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5125, 5134), 'django.utils.translation.ugettext_lazy', '_', (['"""预览日期"""'], {}), "('预览日期')\n", (5126, 5134), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5224, 5238), 'django.utils.translation.ugettext_lazy', '_', (['"""预览是否搜索子目录"""'], {}), "('预览是否搜索子目录')\n", (5225, 5238), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5309, 5320), 'django.utils.translation.ugettext_lazy', '_', (['"""预览开始日期"""'], {}), "('预览开始日期')\n", (5310, 5320), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5400, 5411), 'django.utils.translation.ugettext_lazy', '_', (['"""预览结束日期"""'], {}), "('预览结束日期')\n", (5401, 5411), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5476, 5485), 'django.utils.translation.ugettext_lazy', '_', (['"""额外数据"""'], {}), "('额外数据')\n", (5477, 5485), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5547, 5561), 'django.utils.translation.ugettext_lazy', '_', (['"""cos对象文件名称"""'], {}), "('cos对象文件名称')\n", (5548, 5561), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5636, 5645), 'django.utils.translation.ugettext_lazy', '_', (['"""链路id"""'], {}), "('链路id')\n", (5637, 5645), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8264, 8273), 'django.utils.translation.ugettext_lazy', '_', (['"""链路名称"""'], {}), "('链路名称')\n", (8265, 8273), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8322, 8331), 'django.utils.translation.ugettext_lazy', '_', (['"""链路id"""'], {}), "('链路id')\n", (8323, 8331), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8384, 8393), 'django.utils.translation.ugettext_lazy', '_', (['"""链路类型"""'], {}), "('链路类型')\n", (8385, 8393), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8480, 8488), 'django.utils.translation.ugettext_lazy', '_', (['"""执行人"""'], {}), "('执行人')\n", (8481, 8488), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8545, 8561), 'django.utils.translation.ugettext_lazy', '_', (['"""执行bk_biz_id"""'], {}), "('执行bk_biz_id')\n", (8546, 8561), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8602, 8618), 'django.utils.translation.ugettext_lazy', '_', (['"""腾讯云SecretId"""'], {}), "('腾讯云SecretId')\n", (8603, 8618), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8721, 8738), 'django.utils.translation.ugettext_lazy', '_', (['"""腾讯云SecretKey"""'], {}), "('腾讯云SecretKey')\n", (8722, 8738), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8851, 8865), 'django.utils.translation.ugettext_lazy', '_', (['"""腾讯云Cos桶名称"""'], {}), "('腾讯云Cos桶名称')\n", (8852, 8865), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8988, 9001), 'django.utils.translation.ugettext_lazy', '_', (['"""腾讯云Cos区域"""'], {}), "('腾讯云Cos区域')\n", (8989, 9001), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9110, 9119), 'django.utils.translation.ugettext_lazy', '_', (['"""是否启用"""'], {}), "('是否启用')\n", (9111, 9119), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9173, 9182), 'django.utils.translation.ugettext_lazy', '_', (['"""创建时间"""'], {}), "('创建时间')\n", (9174, 9182), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9281, 9339), 'django.utils.translation.ugettext_lazy', '_', (['"""提取链路 (第一次配置链路之后 需要重新部署saas && 暂时只支持linux及安装了cgwin的系统)"""'], {}), "('提取链路 (第一次配置链路之后 需要重新部署saas && 暂时只支持linux及安装了cgwin的系统)')\n", (9282, 9339), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9370, 9428), 'django.utils.translation.ugettext_lazy', '_', (['"""提取链路 (第一次配置链路之后 需要重新部署saas && 暂时只支持linux及安装了cgwin的系统)"""'], {}), "('提取链路 (第一次配置链路之后 需要重新部署saas && 暂时只支持linux及安装了cgwin的系统)')\n", (9371, 9428), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9502, 9511), 'django.utils.translation.ugettext_lazy', '_', (['"""挂载目录"""'], {}), "('挂载目录')\n", (9503, 9511), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9579, 9591), 'django.utils.translation.ugettext_lazy', '_', (['"""主机云区域id"""'], {}), "('主机云区域id')\n", (9580, 9591), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9631, 9640), 'django.utils.translation.ugettext_lazy', '_', (['"""主机ip"""'], {}), "('主机ip')\n", (9632, 9640), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9750, 9760), 'django.utils.translation.ugettext_lazy', '_', (['"""链路中转机"""'], {}), "('链路中转机')\n", (9751, 9760), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9791, 9801), 'django.utils.translation.ugettext_lazy', '_', (['"""链路中转机"""'], {}), "('链路中转机')\n", (9792, 9801), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2830, 2864), 'functools.reduce', 'reduce', (['operator.or_', 'filter_query'], {}), '(operator.or_, filter_query)\n', (2836, 2864), False, 'from functools import reduce\n'), ((6238, 6278), 'pipeline.service.task_service.get_state', 'task_service.get_state', (['self.pipeline_id'], {}), '(self.pipeline_id)\n', (6260, 6278), False, 'from pipeline.service import task_service\n'), ((8671, 8685), 'django.utils.translation.ugettext_lazy', '_', (['"""内网链路不需要填写"""'], {}), "('内网链路不需要填写')\n", (8672, 8685), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8791, 8805), 'django.utils.translation.ugettext_lazy', '_', (['"""内网链路不需要填写"""'], {}), "('内网链路不需要填写')\n", (8792, 8805), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8929, 8943), 'django.utils.translation.ugettext_lazy', '_', (['"""内网链路不需要填写"""'], {}), "('内网链路不需要填写')\n", (8930, 8943), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9063, 9077), 'django.utils.translation.ugettext_lazy', '_', (['"""内网链路不需要填写"""'], {}), "('内网链路不需要填写')\n", (9064, 9077), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2736, 2773), 'django.db.models.Q', 'Q', ([], {}), "(**{f'{field}__icontains': keyword})\n", (2737, 2773), False, 'from django.db.models import Q\n'), ((7956, 7968), 'django.utils.translation.ugettext_lazy', '_', (['"""下载文件总大小"""'], {}), "('下载文件总大小')\n", (7957, 7968), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8011, 8027), 'django.utils.translation.ugettext_lazy', '_', (['"""下载原始文件原始总大小"""'], {}), "('下载原始文件原始总大小')\n", (8012, 8027), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8065, 8077), 'django.utils.translation.ugettext_lazy', '_', (['"""下载文件总数量"""'], {}), "('下载文件总数量')\n", (8066, 8077), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7147, 7212), 'datetime.datetime.strptime', 'datetime.strptime', (["component['finish_time']", 'PIPELINE_TIME_FORMAT'], {}), "(component['finish_time'], PIPELINE_TIME_FORMAT)\n", (7164, 7212), False, 'from datetime import datetime\n'), ((7235, 7299), 'datetime.datetime.strptime', 'datetime.strptime', (["component['start_time']", 'PIPELINE_TIME_FORMAT'], {}), "(component['start_time'], PIPELINE_TIME_FORMAT)\n", (7252, 7299), False, 'from datetime import datetime\n')]
|
from functools import wraps
import os
import tempfile
import tarfile
def TemporaryDirectory(func):
'''This decorator creates temporary directory and wraps given fuction'''
@wraps(func)
def wrapper(*args, **kwargs):
cwd = os.getcwd()
with tempfile.TemporaryDirectory() as tmp_path:
os.chdir(tmp_path)
result = func(*args, **kwargs)
os.chdir(cwd)
return result
return wrapper
def make_tarfile(output_filename, source_dir):
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
|
[
"tempfile.TemporaryDirectory",
"os.path.basename",
"os.getcwd",
"functools.wraps",
"tarfile.open",
"os.chdir"
] |
[((184, 195), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (189, 195), False, 'from functools import wraps\n'), ((244, 255), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (253, 255), False, 'import os\n'), ((516, 553), 'tarfile.open', 'tarfile.open', (['output_filename', '"""w:gz"""'], {}), "(output_filename, 'w:gz')\n", (528, 553), False, 'import tarfile\n'), ((269, 298), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (296, 298), False, 'import tempfile\n'), ((324, 342), 'os.chdir', 'os.chdir', (['tmp_path'], {}), '(tmp_path)\n', (332, 342), False, 'import os\n'), ((398, 411), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (406, 411), False, 'import os\n'), ((598, 626), 'os.path.basename', 'os.path.basename', (['source_dir'], {}), '(source_dir)\n', (614, 626), False, 'import os\n')]
|
from django.urls import reverse
from series_tiempo_ar_api.apps.api.tests.endpoint_tests.endpoint_test_case import EndpointTestCase
class PaginationTests(EndpointTestCase):
def test_get_single_value(self):
resp = self.client.get(reverse('api:series:series'), data={'ids': self.increasing_month_series_id, 'limit': 1})
self.assertEqual(len(resp.json()['data']), 1)
def test_get_five_offset_values(self):
data = {'ids': self.increasing_month_series_id, 'start': 5, 'limit': 5}
resp = self.run_query(data)
data = [
['1999-06-01', 105],
['1999-07-01', 106],
['1999-08-01', 107],
['1999-09-01', 108],
['1999-10-01', 109],
]
self.assertEqual(resp['data'], data)
|
[
"django.urls.reverse"
] |
[((244, 272), 'django.urls.reverse', 'reverse', (['"""api:series:series"""'], {}), "('api:series:series')\n", (251, 272), False, 'from django.urls import reverse\n')]
|
# standard lib
from functools import wraps
# third party packages
from flask import Flask, jsonify, abort, request, Response
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.debug = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
app.secret_key = '<KEY>'
db = SQLAlchemy(app)
# region dbClasses
class User(db.Model):
"""
Represent a user in database
"""
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email):
self.username = username
self.email = email
def __repr__(self):
return '<User {}>'.format(self.username)
def to_dict(self):
return {
'id': self.id,
'username': self.username,
'email': self.email
}
# endregion
# region authorization
def check_auth(username, password):
return username == 'admin' and password == '<PASSWORD>'
def authenticate():
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
# endregion
@app.route('/')
@requires_auth
def hello_world():
return 'Hello World!'
@app.route('/users', methods=['GET', 'POST'])
def get_users():
if request.method == 'POST':
if (request.json['username'] is None
or request.json['email'] is None):
abort()
user = User(request.json['username'],
request.json['email'])
db.session.add(user)
db.session.commit()
return jsonify({'user': user.to_dict()}), 201
elif request.method == 'GET':
users = User.query.all()
users_dto = [user.to_dict() for user in users]
return jsonify({'users': users_dto}), 200
else:
abort(405, "Method not supported")
@app.errorhandler(405)
def custom405(error):
response = jsonify({'message': error.description})
return response, 405
if __name__ == '__main__':
if app.debug:
app.run()
else:
app.run(host='0.0.0.0')
|
[
"flask.ext.sqlalchemy.SQLAlchemy",
"flask.Flask",
"flask.abort",
"flask.jsonify",
"functools.wraps",
"flask.Response"
] |
[((177, 192), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (182, 192), False, 'from flask import Flask, jsonify, abort, request, Response\n'), ((300, 315), 'flask.ext.sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (310, 315), False, 'from flask.ext.sqlalchemy import SQLAlchemy\n'), ((1055, 1230), 'flask.Response', 'Response', (['"""Could not verify your access level for that URL.\nYou have to login with proper credentials"""', '(401)', '{\'WWW-Authenticate\': \'Basic realm="Login Required"\'}'], {}), '(\n """Could not verify your access level for that URL.\nYou have to login with proper credentials"""\n , 401, {\'WWW-Authenticate\': \'Basic realm="Login Required"\'})\n', (1063, 1230), False, 'from flask import Flask, jsonify, abort, request, Response\n'), ((1263, 1271), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (1268, 1271), False, 'from functools import wraps\n'), ((2296, 2335), 'flask.jsonify', 'jsonify', (["{'message': error.description}"], {}), "({'message': error.description})\n", (2303, 2335), False, 'from flask import Flask, jsonify, abort, request, Response\n'), ((1800, 1807), 'flask.abort', 'abort', ([], {}), '()\n', (1805, 1807), False, 'from flask import Flask, jsonify, abort, request, Response\n'), ((2198, 2232), 'flask.abort', 'abort', (['(405)', '"""Method not supported"""'], {}), "(405, 'Method not supported')\n", (2203, 2232), False, 'from flask import Flask, jsonify, abort, request, Response\n'), ((2145, 2174), 'flask.jsonify', 'jsonify', (["{'users': users_dto}"], {}), "({'users': users_dto})\n", (2152, 2174), False, 'from flask import Flask, jsonify, abort, request, Response\n')]
|
"""
Automatic 2D class selection tool.
MIT License
Copyright (c) 2019 <NAME> Institute of Molecular Physiology
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from os import path, listdir
import h5py
from PIL import Image # install it via pip install pillow
import numpy as np
import mrcfile
"""
The format of the .hf file is the following:
['MDF']['images']['i']['image'] where i is a number representing the i-th images
hence to get the images number 5:
['MDF']['images']['5']['image'][()]
"""
def create_circular_mask(h, w, center=None, radius=None):
if center is None: # use the middle of the image
center = (int(w / 2), int(h / 2))
if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w - center[0], h - center[1])
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)
mask = dist_from_center <= radius
return mask
def checkfiles(path_to_files):
"""
checks if the hdf files are in the correct path and returns True if all of them exists
:param path_to_files: list of paths
:return:
"""
if isinstance(path_to_files, (list, tuple)):
for p in path_to_files:
if not path.isfile(p):
return False
elif isinstance(path_to_files, str):
return path.isfile(path_to_files)
return True
def calc_2d_spectra(img):
from scipy import fftpack
import numpy as np
F1 = fftpack.fft2(img)
F2 = fftpack.fftshift(F1)
psd2D = np.abs(F2) ** 2
return psd2D
def getList_files(paths):
"""
Returns the list of the valid hdf files in the given paths. It is called recursively
:param paths: path or list of paths
:return:
"""
if isinstance(paths, str):
paths = [paths]
list_new_paths = list()
iterate = False
for p in paths:
if path.isdir(p):
iterate = True
list_new_paths += [path.join(p, f) for f in listdir(p)]
elif path.isfile(p):
list_new_paths.append(p)
else:
print(
"WARNING: The given path '"
+ str(p)
+ "' is not a folder or a file and it will be ignored"
)
if iterate is True:
return getList_files(list_new_paths)
return list_new_paths
def getList_relevant_files(path_to_files):
"""
Check if the given files are hdf/mrcs/st with a valid format. Return The list of valid hdf
:param path_to_files: list of all the files present in the folder (and subfolder)given from the user
:return: list of valid hdf
"""
return [
path_to_file
for path_to_file in path_to_files
if path_to_file.endswith("mrcs")
or path_to_file.endswith("mrc")
or path_to_file.endswith("st")
or h5py.is_hdf5(path_to_file)
]
""" FUNCTION TO READ THE HDF"""
def get_key_list_images(path):
"""
Returns the list of the keys representing the images in the hdf/mrcs/st file. It will be converted in list of integer
:param path:
:return:
"""
print("Try to list images on", path)
import os
filename_ext = os.path.basename(path).split(".")[-1]
result_list = None
try:
if filename_ext in {"mrcs", "st"}:
with mrcfile.mmap(path, permissive=True, mode="r") as mrc:
list_candidate = [i for i in range(mrc.header.nz)]
if len(list_candidate) > 0:
result_list = list_candidate
if filename_ext == "mrc":
with mrcfile.mmap(path, permissive=True, mode="r") as mrc:
result_list = list(range(1))
except Exception as e:
print(e)
print(
"WARNING in get_list_images: the file '"
+ path
+ " is not an valid mrc file. It will be ignored"
)
if filename_ext == "hdf":
try:
with h5py.File(path, "r") as f:
list_candidate = [int(v) for v in list(f["MDF"]["images"])]
except:
print(
"WARNING in get_list_images: the file '"
+ path
+ " is not an HDF file with the following format:\n\t['MDF']['images']. It will be ignored"
)
if len(list_candidate) > 0:
result_list = list_candidate
return result_list
def getImages_fromList_key(file_index_tubles):
"""
Returns the images in the hdf file (path_to_file) listed in (list_images)
:param path_to_file: path to hdf file
:param list_images: list of keys of the DB. It is the output( or part of its) given from 'get_list_images'
:return: Returns a list of numpy arrays
"""
# driver="core"
result_data = list()
for path_to_file, list_images in file_index_tubles:
data = list()
if path.isfile(path_to_file):
if path.basename(path_to_file).split(".")[-1] == "hdf":
try:
with h5py.File(path_to_file, 'r') as f:
if isinstance(list_images, list) or isinstance(
list_images, tuple
):
data = [
np.nan_to_num(f["MDF"]["images"][str(i)]["image"][()])
for i in list_images
] # [()] is used instead of .value
elif isinstance(list_images, int):
data = np.nan_to_num(f["MDF"]["images"][str(list_images)]["image"][()])
else:
print(
"\nERROR in getImages_fromList_key: invalid list_images, it should be a string or a list/tuple of strings:",
type(list_images),
)
print("you try to get the following images")
print(list_images)
exit()
except Exception as e:
print(e)
print(
"\nERROR in getImages_fromList_key: the file '"
+ path_to_file
+ " is not an HDF file with the following format:\n\t['MDF']['images']['0']['image']"
)
print("you try to get the following images")
print(list_images)
print("there are " + str(len(f["MDF"]["images"])))
exit()
elif path.basename(path_to_file).split(".")[-1] in ["mrc", "mrcs", "st"]:
data = []
with mrcfile.mmap(path_to_file, permissive=True, mode="r") as mrc:
if isinstance(list_images, int):
list_images = [list_images]
if isinstance(list_images, list) or isinstance(list_images, tuple):
if mrc.header.nz > 1:
if len(list_images)==1:
data = np.nan_to_num(mrc.data[list_images[0]])
else:
data = [np.nan_to_num(mrc.data[i]) for i in list_images]
elif len(list_images) == 1:
data = np.nan_to_num(mrc.data)
result_data.append(data)
return result_data
def getImages_fromList_key_old(path_to_file, list_images):
"""
Returns the images in the hdf file (path_to_file) listed in (list_images)
:param path_to_file: path to hdf file
:param list_images: list of keys of the DB. It is the output( or part of its) given from 'get_list_images'
:return: Returns a list of numpy arrays
"""
data = list()
if path.isfile(path_to_file):
if path.basename(path_to_file).split(".")[-1] == "hdf":
try:
with h5py.File(path_to_file, driver="core") as f:
if isinstance(list_images, list) or isinstance(list_images, tuple):
data = [
f["MDF"]["images"][str(i)]["image"][()] for i in list_images
] # [()] is used instead of .value
elif isinstance(list_images, int):
data = f["MDF"]["images"][str(list_images)]["image"][()]
else:
print(
"\nERROR in getImages_fromList_key: invalid list_images, it should be a string or a list/tuple of strings:",
type(list_images),
)
print("you try to get the following images")
print(list_images)
exit()
except Exception as e:
print(e)
print(
"\nERROR in getImages_fromList_key: the file '"
+ path_to_file
+ " is not an HDF file with the following format:\n\t['MDF']['images']['0']['image']"
)
print("you try to get the following images")
print(list_images)
print("there are " + str(len(f["MDF"]["images"])))
exit()
elif path.basename(path_to_file).split(".")[-1] in ["mrc", "mrcs", "st"]:
data = []
with mrcfile.mmap(path_to_file, permissive=True, mode="r") as mrc:
if isinstance(list_images, int):
list_images = [list_images]
if isinstance(list_images, list) or isinstance(list_images, tuple):
if mrc.header.nz > 1:
data = [mrc.data[i] for i in list_images]
elif len(list_images) == 1:
data = [mrc.data]
return data
""" FUNCTION TO MANIPULATE THE IMAGES"""
def apply_mask(img, mask):
mean = np.mean(img)
img[mask==False]=mean
return img
def resize_img(img, resize=(76, 76)):
"""
Resize the given image into the given size
:param img: as numpy array
:param resize: resize size
:return: return the resized img
"""
im = Image.fromarray(img)
return np.array(im.resize(resize, resample=Image.BILINEAR))
def normalize_img(img):
"""
normalize the images in base of its mean and variance
:param img:
:return:
"""
import numpy as np
# img = img.astype(np.float64, copy=False)
mean = np.mean(img)
std = np.std(img)
img = (img - mean) / (std+0.00001)
# img = img.astype(np.float32, copy=False)
return img
def flip_img(img, t=None):
"""
It flip the image in function of the given typ
:param img:
:param t: type of the flip
1 --> flip over the row. Flipped array in up-down direction.(X)
2 --> flip over the column Flipped array in right-left direction(Y)
3 --> flip over the column and the row (X and Y)
otherwise --> no flip
:return:
"""
if t == 1:
return np.flipud(img)
elif t == 2:
return np.fliplr(img)
elif t == 3:
return np.flipud(np.fliplr(img))
return img
|
[
"h5py.File",
"numpy.abs",
"numpy.nan_to_num",
"os.path.basename",
"numpy.std",
"scipy.fftpack.fftshift",
"os.path.isdir",
"numpy.flipud",
"numpy.fliplr",
"os.path.isfile",
"numpy.mean",
"scipy.fftpack.fft2",
"PIL.Image.fromarray",
"h5py.is_hdf5",
"os.path.join",
"os.listdir",
"mrcfile.mmap",
"numpy.sqrt"
] |
[((1861, 1913), 'numpy.sqrt', 'np.sqrt', (['((X - center[0]) ** 2 + (Y - center[1]) ** 2)'], {}), '((X - center[0]) ** 2 + (Y - center[1]) ** 2)\n', (1868, 1913), True, 'import numpy as np\n'), ((2499, 2516), 'scipy.fftpack.fft2', 'fftpack.fft2', (['img'], {}), '(img)\n', (2511, 2516), False, 'from scipy import fftpack\n'), ((2526, 2546), 'scipy.fftpack.fftshift', 'fftpack.fftshift', (['F1'], {}), '(F1)\n', (2542, 2546), False, 'from scipy import fftpack\n'), ((8828, 8853), 'os.path.isfile', 'path.isfile', (['path_to_file'], {}), '(path_to_file)\n', (8839, 8853), False, 'from os import path, listdir\n'), ((10977, 10989), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (10984, 10989), True, 'import numpy as np\n'), ((11241, 11261), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (11256, 11261), False, 'from PIL import Image\n'), ((11537, 11549), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (11544, 11549), True, 'import numpy as np\n'), ((11560, 11571), 'numpy.std', 'np.std', (['img'], {}), '(img)\n', (11566, 11571), True, 'import numpy as np\n'), ((2559, 2569), 'numpy.abs', 'np.abs', (['F2'], {}), '(F2)\n', (2565, 2569), True, 'import numpy as np\n'), ((2913, 2926), 'os.path.isdir', 'path.isdir', (['p'], {}), '(p)\n', (2923, 2926), False, 'from os import path, listdir\n'), ((5883, 5908), 'os.path.isfile', 'path.isfile', (['path_to_file'], {}), '(path_to_file)\n', (5894, 5908), False, 'from os import path, listdir\n'), ((12126, 12140), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (12135, 12140), True, 'import numpy as np\n'), ((2365, 2391), 'os.path.isfile', 'path.isfile', (['path_to_files'], {}), '(path_to_files)\n', (2376, 2391), False, 'from os import path, listdir\n'), ((3036, 3050), 'os.path.isfile', 'path.isfile', (['p'], {}), '(p)\n', (3047, 3050), False, 'from os import path, listdir\n'), ((12173, 12187), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (12182, 12187), True, 'import numpy as np\n'), ((2264, 2278), 'os.path.isfile', 'path.isfile', (['p'], {}), '(p)\n', (2275, 2278), False, 'from os import path, listdir\n'), ((2986, 3001), 'os.path.join', 'path.join', (['p', 'f'], {}), '(p, f)\n', (2995, 3001), False, 'from os import path, listdir\n'), ((3871, 3897), 'h5py.is_hdf5', 'h5py.is_hdf5', (['path_to_file'], {}), '(path_to_file)\n', (3883, 3897), False, 'import h5py\n'), ((4212, 4234), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (4228, 4234), False, 'import os\n'), ((4342, 4387), 'mrcfile.mmap', 'mrcfile.mmap', (['path'], {'permissive': '(True)', 'mode': '"""r"""'}), "(path, permissive=True, mode='r')\n", (4354, 4387), False, 'import mrcfile\n'), ((4607, 4652), 'mrcfile.mmap', 'mrcfile.mmap', (['path'], {'permissive': '(True)', 'mode': '"""r"""'}), "(path, permissive=True, mode='r')\n", (4619, 4652), False, 'import mrcfile\n'), ((4970, 4990), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (4979, 4990), False, 'import h5py\n'), ((3011, 3021), 'os.listdir', 'listdir', (['p'], {}), '(p)\n', (3018, 3021), False, 'from os import path, listdir\n'), ((8957, 8995), 'h5py.File', 'h5py.File', (['path_to_file'], {'driver': '"""core"""'}), "(path_to_file, driver='core')\n", (8966, 8995), False, 'import h5py\n'), ((10435, 10488), 'mrcfile.mmap', 'mrcfile.mmap', (['path_to_file'], {'permissive': '(True)', 'mode': '"""r"""'}), "(path_to_file, permissive=True, mode='r')\n", (10447, 10488), False, 'import mrcfile\n'), ((12230, 12244), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (12239, 12244), True, 'import numpy as np\n'), ((6024, 6052), 'h5py.File', 'h5py.File', (['path_to_file', '"""r"""'], {}), "(path_to_file, 'r')\n", (6033, 6052), False, 'import h5py\n'), ((7720, 7773), 'mrcfile.mmap', 'mrcfile.mmap', (['path_to_file'], {'permissive': '(True)', 'mode': '"""r"""'}), "(path_to_file, permissive=True, mode='r')\n", (7732, 7773), False, 'import mrcfile\n'), ((8866, 8893), 'os.path.basename', 'path.basename', (['path_to_file'], {}), '(path_to_file)\n', (8879, 8893), False, 'from os import path, listdir\n'), ((5925, 5952), 'os.path.basename', 'path.basename', (['path_to_file'], {}), '(path_to_file)\n', (5938, 5952), False, 'from os import path, listdir\n'), ((10327, 10354), 'os.path.basename', 'path.basename', (['path_to_file'], {}), '(path_to_file)\n', (10340, 10354), False, 'from os import path, listdir\n'), ((7604, 7631), 'os.path.basename', 'path.basename', (['path_to_file'], {}), '(path_to_file)\n', (7617, 7631), False, 'from os import path, listdir\n'), ((8114, 8153), 'numpy.nan_to_num', 'np.nan_to_num', (['mrc.data[list_images[0]]'], {}), '(mrc.data[list_images[0]])\n', (8127, 8153), True, 'import numpy as np\n'), ((8364, 8387), 'numpy.nan_to_num', 'np.nan_to_num', (['mrc.data'], {}), '(mrc.data)\n', (8377, 8387), True, 'import numpy as np\n'), ((8228, 8254), 'numpy.nan_to_num', 'np.nan_to_num', (['mrc.data[i]'], {}), '(mrc.data[i])\n', (8241, 8254), True, 'import numpy as np\n')]
|
# Generated by Django 2.1.7 on 2019-02-15 07:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("question", "0005_merge_20190215_0616")]
operations = [
migrations.RemoveField(model_name="answer", name="is_visible"),
migrations.RemoveField(model_name="comment", name="is_visible"),
migrations.RemoveField(model_name="question", name="is_visible"),
]
|
[
"django.db.migrations.RemoveField"
] |
[((214, 276), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""answer"""', 'name': '"""is_visible"""'}), "(model_name='answer', name='is_visible')\n", (236, 276), False, 'from django.db import migrations\n'), ((286, 349), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""comment"""', 'name': '"""is_visible"""'}), "(model_name='comment', name='is_visible')\n", (308, 349), False, 'from django.db import migrations\n'), ((359, 423), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""question"""', 'name': '"""is_visible"""'}), "(model_name='question', name='is_visible')\n", (381, 423), False, 'from django.db import migrations\n')]
|
"""
Plugin for better notifications with actions.
HexChat Python Interface: http://hexchat.readthedocs.io/en/latest/script_python.html
IRC String Formatting: https://github.com/myano/jenni/wiki/IRC-String-Formatting
"""
import logging
import re
import subprocess
import sys
from os import path
import dbus
import hexchat
__module_name__ = 'highlights_notifications'
__module_description__ = 'Better notifications with actions'
__module_version__ = '1.1'
NOTIFICATION_SERVER = '/home/skontar/Repos/hexchat-plugins/notification_server.py'
LOG = '~/highlights_notifications.log'
FORMAT = '%(asctime)-24s %(levelname)-9s %(message)s'
logging.basicConfig(filename=path.expanduser(LOG), format=FORMAT, level=logging.DEBUG)
def handle_exception(exc_type, exc_value, exc_traceback):
logging.error('Uncaught exception', exc_info=(exc_type, exc_value, exc_traceback))
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.excepthook = handle_exception
def server_start():
logging.info('Starting server')
subprocess.Popen('python3 {}'.format(NOTIFICATION_SERVER), shell=True)
def get_dbus_interface():
logging.info('Getting DBus interface for Notification Server')
try:
session_bus = dbus.SessionBus()
proxy = session_bus.get_object('com.skontar.HexChat', '/com/skontar/HexChat')
interface = dbus.Interface(proxy, dbus_interface='com.skontar.HexChat')
logging.debug('DBus interface Success')
return interface
except dbus.exceptions.DBusException:
logging.debug('DBus interface Fail')
server_start()
return None
def on_focus_tab(word, word_eol, userdata):
global active_channel
active_channel = hexchat.get_info('channel')
logging.info('Changed active tab to %s', active_channel)
def on_highlight_notification(word, word_eol, userdata):
global interface
win_status = hexchat.get_info('win_status')
network = hexchat.get_info('network')
channel = hexchat.get_info('channel')
nickname = word[0]
nickname = re.sub(r'^\x03\d+', '', nickname) # Remove color
text = word[1]
message_type = userdata
if message_type == 'HLT':
title = 'Highlighted message from: {} ({})'.format(nickname, channel)
else:
title = 'Private message from: {} ({})'.format(nickname, network)
logging.info('New notification [%s | %s | %s]', network, channel, repr(str(nickname)))
logging.debug('Application details: [%s | %s]', win_status, active_channel)
logging.debug('Message type: "%s"', message_type)
logging.debug('Message: %s', repr(text))
# Ignore notification if window is active and active channel is the one where message arrived
if win_status == 'active' and channel == active_channel:
logging.info('Not showing notifications as channel is already active')
return hexchat.EAT_NONE
if interface is None:
logging.debug('No DBus interface prepared')
interface = get_dbus_interface()
if interface is None:
logging.warning('DBus connection to Notification Server fail')
logging.warning('Notification fallback')
hexchat.command('TRAY -b "{}" {}'.format(title, text))
else:
try:
logging.info('Sending message to Notification Server through DBus')
interface.create_notification(nickname, network, channel, title, text, message_type)
except dbus.exceptions.DBusException:
logging.warning('DBus message to Notification Server fail')
logging.warning('Notification fallback')
hexchat.command('TRAY -b "{}" {}'.format(title, text))
interface = get_dbus_interface()
return hexchat.EAT_NONE
def on_unload(userdata):
global interface
logging.info('HexChat notification server ending')
hexchat.prnt('Unloading {}, version {}'.format(__module_name__, __module_version__))
logging.info('Setting common notifications to normal')
hexchat.command('set input_balloon_hilight 1')
hexchat.command('set input_balloon_priv 1')
try:
logging.info('Sending Quit message to Notification Server')
interface.quit()
except (AttributeError, dbus.exceptions.DBusException):
logging.warning('Quit message to Notification Server failed')
logging.info('Explicitly quit')
# Unfortunately, this also kills whole HexChat, so the plugin cannot be restarted.
# However, I did not find a better way, as if the plugin used DBus interface it seems to hang
# on exit. Only other workaround I have found was to raise an Exception, but that stopped to
# work when I hooked `sys.excepthook`. I have tried to unhook it just before exit, but that did
# not work either. I find the proper Exception logging more useful than ability to restart
# plugin.
exit(1)
active_channel = None
win_status = None
interface = None
logging.info('HexChat notification plugin starting ==============================')
server_start()
hexchat.prnt('{}, version {}'.format(__module_name__, __module_version__))
logging.info('Setting common notifications to suspended')
hexchat.command('set input_balloon_hilight 0')
hexchat.command('set input_balloon_priv 0')
hexchat.hook_print('Focus Tab', on_focus_tab)
hexchat.hook_unload(on_unload)
hexchat.hook_print('Channel Action Hilight', on_highlight_notification, userdata='HLT')
hexchat.hook_print('Channel Msg Hilight', on_highlight_notification, userdata='HLT')
hexchat.hook_print('Private Message', on_highlight_notification, userdata='PVT')
hexchat.hook_print('Private Message to Dialog', on_highlight_notification, userdata='PVT')
hexchat.hook_print('Private Action to Dialog', on_highlight_notification, userdata='PVT')
|
[
"os.path.expanduser",
"hexchat.hook_unload",
"logging.error",
"dbus.SessionBus",
"sys.__excepthook__",
"logging.debug",
"logging.warning",
"hexchat.hook_print",
"hexchat.command",
"hexchat.get_info",
"logging.info",
"dbus.Interface",
"re.sub"
] |
[((4898, 4986), 'logging.info', 'logging.info', (['"""HexChat notification plugin starting =============================="""'], {}), "(\n 'HexChat notification plugin starting ==============================')\n", (4910, 4986), False, 'import logging\n'), ((5074, 5131), 'logging.info', 'logging.info', (['"""Setting common notifications to suspended"""'], {}), "('Setting common notifications to suspended')\n", (5086, 5131), False, 'import logging\n'), ((5132, 5178), 'hexchat.command', 'hexchat.command', (['"""set input_balloon_hilight 0"""'], {}), "('set input_balloon_hilight 0')\n", (5147, 5178), False, 'import hexchat\n'), ((5179, 5222), 'hexchat.command', 'hexchat.command', (['"""set input_balloon_priv 0"""'], {}), "('set input_balloon_priv 0')\n", (5194, 5222), False, 'import hexchat\n'), ((5223, 5268), 'hexchat.hook_print', 'hexchat.hook_print', (['"""Focus Tab"""', 'on_focus_tab'], {}), "('Focus Tab', on_focus_tab)\n", (5241, 5268), False, 'import hexchat\n'), ((5269, 5299), 'hexchat.hook_unload', 'hexchat.hook_unload', (['on_unload'], {}), '(on_unload)\n', (5288, 5299), False, 'import hexchat\n'), ((5300, 5391), 'hexchat.hook_print', 'hexchat.hook_print', (['"""Channel Action Hilight"""', 'on_highlight_notification'], {'userdata': '"""HLT"""'}), "('Channel Action Hilight', on_highlight_notification,\n userdata='HLT')\n", (5318, 5391), False, 'import hexchat\n'), ((5388, 5476), 'hexchat.hook_print', 'hexchat.hook_print', (['"""Channel Msg Hilight"""', 'on_highlight_notification'], {'userdata': '"""HLT"""'}), "('Channel Msg Hilight', on_highlight_notification,\n userdata='HLT')\n", (5406, 5476), False, 'import hexchat\n'), ((5473, 5558), 'hexchat.hook_print', 'hexchat.hook_print', (['"""Private Message"""', 'on_highlight_notification'], {'userdata': '"""PVT"""'}), "('Private Message', on_highlight_notification, userdata='PVT'\n )\n", (5491, 5558), False, 'import hexchat\n'), ((5554, 5648), 'hexchat.hook_print', 'hexchat.hook_print', (['"""Private Message to Dialog"""', 'on_highlight_notification'], {'userdata': '"""PVT"""'}), "('Private Message to Dialog', on_highlight_notification,\n userdata='PVT')\n", (5572, 5648), False, 'import hexchat\n'), ((5645, 5738), 'hexchat.hook_print', 'hexchat.hook_print', (['"""Private Action to Dialog"""', 'on_highlight_notification'], {'userdata': '"""PVT"""'}), "('Private Action to Dialog', on_highlight_notification,\n userdata='PVT')\n", (5663, 5738), False, 'import hexchat\n'), ((787, 873), 'logging.error', 'logging.error', (['"""Uncaught exception"""'], {'exc_info': '(exc_type, exc_value, exc_traceback)'}), "('Uncaught exception', exc_info=(exc_type, exc_value,\n exc_traceback))\n", (800, 873), False, 'import logging\n'), ((874, 928), 'sys.__excepthook__', 'sys.__excepthook__', (['exc_type', 'exc_value', 'exc_traceback'], {}), '(exc_type, exc_value, exc_traceback)\n', (892, 928), False, 'import sys\n'), ((990, 1021), 'logging.info', 'logging.info', (['"""Starting server"""'], {}), "('Starting server')\n", (1002, 1021), False, 'import logging\n'), ((1129, 1191), 'logging.info', 'logging.info', (['"""Getting DBus interface for Notification Server"""'], {}), "('Getting DBus interface for Notification Server')\n", (1141, 1191), False, 'import logging\n'), ((1703, 1730), 'hexchat.get_info', 'hexchat.get_info', (['"""channel"""'], {}), "('channel')\n", (1719, 1730), False, 'import hexchat\n'), ((1735, 1791), 'logging.info', 'logging.info', (['"""Changed active tab to %s"""', 'active_channel'], {}), "('Changed active tab to %s', active_channel)\n", (1747, 1791), False, 'import logging\n'), ((1889, 1919), 'hexchat.get_info', 'hexchat.get_info', (['"""win_status"""'], {}), "('win_status')\n", (1905, 1919), False, 'import hexchat\n'), ((1934, 1961), 'hexchat.get_info', 'hexchat.get_info', (['"""network"""'], {}), "('network')\n", (1950, 1961), False, 'import hexchat\n'), ((1976, 2003), 'hexchat.get_info', 'hexchat.get_info', (['"""channel"""'], {}), "('channel')\n", (1992, 2003), False, 'import hexchat\n'), ((2042, 2076), 're.sub', 're.sub', (['"""^\\\\x03\\\\d+"""', '""""""', 'nickname'], {}), "('^\\\\x03\\\\d+', '', nickname)\n", (2048, 2076), False, 'import re\n'), ((2428, 2503), 'logging.debug', 'logging.debug', (['"""Application details: [%s | %s]"""', 'win_status', 'active_channel'], {}), "('Application details: [%s | %s]', win_status, active_channel)\n", (2441, 2503), False, 'import logging\n'), ((2508, 2557), 'logging.debug', 'logging.debug', (['"""Message type: "%s\\""""', 'message_type'], {}), '(\'Message type: "%s"\', message_type)\n', (2521, 2557), False, 'import logging\n'), ((3768, 3818), 'logging.info', 'logging.info', (['"""HexChat notification server ending"""'], {}), "('HexChat notification server ending')\n", (3780, 3818), False, 'import logging\n'), ((3912, 3966), 'logging.info', 'logging.info', (['"""Setting common notifications to normal"""'], {}), "('Setting common notifications to normal')\n", (3924, 3966), False, 'import logging\n'), ((3971, 4017), 'hexchat.command', 'hexchat.command', (['"""set input_balloon_hilight 1"""'], {}), "('set input_balloon_hilight 1')\n", (3986, 4017), False, 'import hexchat\n'), ((4022, 4065), 'hexchat.command', 'hexchat.command', (['"""set input_balloon_priv 1"""'], {}), "('set input_balloon_priv 1')\n", (4037, 4065), False, 'import hexchat\n'), ((4303, 4334), 'logging.info', 'logging.info', (['"""Explicitly quit"""'], {}), "('Explicitly quit')\n", (4315, 4334), False, 'import logging\n'), ((665, 685), 'os.path.expanduser', 'path.expanduser', (['LOG'], {}), '(LOG)\n', (680, 685), False, 'from os import path\n'), ((1223, 1240), 'dbus.SessionBus', 'dbus.SessionBus', ([], {}), '()\n', (1238, 1240), False, 'import dbus\n'), ((1347, 1406), 'dbus.Interface', 'dbus.Interface', (['proxy'], {'dbus_interface': '"""com.skontar.HexChat"""'}), "(proxy, dbus_interface='com.skontar.HexChat')\n", (1361, 1406), False, 'import dbus\n'), ((1415, 1454), 'logging.debug', 'logging.debug', (['"""DBus interface Success"""'], {}), "('DBus interface Success')\n", (1428, 1454), False, 'import logging\n'), ((2771, 2841), 'logging.info', 'logging.info', (['"""Not showing notifications as channel is already active"""'], {}), "('Not showing notifications as channel is already active')\n", (2783, 2841), False, 'import logging\n'), ((2909, 2952), 'logging.debug', 'logging.debug', (['"""No DBus interface prepared"""'], {}), "('No DBus interface prepared')\n", (2922, 2952), False, 'import logging\n'), ((3029, 3091), 'logging.warning', 'logging.warning', (['"""DBus connection to Notification Server fail"""'], {}), "('DBus connection to Notification Server fail')\n", (3044, 3091), False, 'import logging\n'), ((3100, 3140), 'logging.warning', 'logging.warning', (['"""Notification fallback"""'], {}), "('Notification fallback')\n", (3115, 3140), False, 'import logging\n'), ((4084, 4143), 'logging.info', 'logging.info', (['"""Sending Quit message to Notification Server"""'], {}), "('Sending Quit message to Notification Server')\n", (4096, 4143), False, 'import logging\n'), ((1530, 1566), 'logging.debug', 'logging.debug', (['"""DBus interface Fail"""'], {}), "('DBus interface Fail')\n", (1543, 1566), False, 'import logging\n'), ((3239, 3306), 'logging.info', 'logging.info', (['"""Sending message to Notification Server through DBus"""'], {}), "('Sending message to Notification Server through DBus')\n", (3251, 3306), False, 'import logging\n'), ((4237, 4298), 'logging.warning', 'logging.warning', (['"""Quit message to Notification Server failed"""'], {}), "('Quit message to Notification Server failed')\n", (4252, 4298), False, 'import logging\n'), ((3462, 3521), 'logging.warning', 'logging.warning', (['"""DBus message to Notification Server fail"""'], {}), "('DBus message to Notification Server fail')\n", (3477, 3521), False, 'import logging\n'), ((3534, 3574), 'logging.warning', 'logging.warning', (['"""Notification fallback"""'], {}), "('Notification fallback')\n", (3549, 3574), False, 'import logging\n')]
|
import csv
from ...utils import quiet_remove
from ..delimited import cant_handle_hint
from ..processing_instructions import ProcessingInstructions
from ..records_format import DelimitedRecordsFormat
from records_mover.mover_types import _assert_never
import logging
from typing import Set, Dict
logger = logging.getLogger(__name__)
def pandas_to_csv_options(records_format: DelimitedRecordsFormat,
unhandled_hints: Set[str],
processing_instructions: ProcessingInstructions) -> Dict[str, object]:
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_csv.html
hints = records_format.\
validate(fail_if_cant_handle_hint=processing_instructions.fail_if_cant_handle_hint)
fail_if_cant_handle_hint = processing_instructions.fail_if_cant_handle_hint
pandas_options: Dict[str, object] = {}
pandas_options['encoding'] = hints.encoding
quiet_remove(unhandled_hints, 'encoding')
if hints.compression is None:
# hints['compression']=None will output an uncompressed csv,
# which is the pandas default.
pass
elif hints.compression == 'GZIP':
pandas_options['compression'] = 'gzip'
elif hints.compression == 'BZIP':
pandas_options['compression'] = 'bz2'
else:
cant_handle_hint(fail_if_cant_handle_hint, 'compression', hints)
quiet_remove(unhandled_hints, 'compression')
if hints.quoting is None:
pandas_options['quoting'] = csv.QUOTE_NONE
elif hints.quoting == 'all':
pandas_options['quoting'] = csv.QUOTE_ALL
elif hints.quoting == 'minimal':
pandas_options['quoting'] = csv.QUOTE_MINIMAL
elif hints.quoting == 'nonnumeric':
pandas_options['quoting'] = csv.QUOTE_NONNUMERIC
else:
_assert_never(hints.quoting)
quiet_remove(unhandled_hints, 'quoting')
pandas_options['doublequote'] = hints.doublequote
quiet_remove(unhandled_hints, 'doublequote')
pandas_options['quotechar'] = hints.quotechar
quiet_remove(unhandled_hints, 'quotechar')
if hints.escape is None:
pass
else:
pandas_options['escapechar'] = hints.escape
quiet_remove(unhandled_hints, 'escape')
pandas_options['header'] = hints.header_row
quiet_remove(unhandled_hints, 'header-row')
if hints.dateformat is None:
if hints.datetimeformattz == hints.datetimeformat:
# BigQuery requires that timezone offsets have a colon;
# Python (and thus Pandas) doesn't support adding the
# colon with strftime. However, we can specify things
# without a timezone delimiter just fine.
#
# Unfortunately Python/Pandas will drop the timezone info
# instead of converting the timestamp to UTC. This
# corrupts the time, as BigQuery assumes what it gets in
# is UTC format. Boo.
#
# $ python3
# >>> import pytz
# >>> us_eastern = pytz.timezone('US/Eastern')
# >>> import datetime
# >>> us_eastern.localize(datetime.datetime(2000, 1, 2, 12, 34, 56, 789012))
# .strftime('%Y-%m-%d %H:%M:%S.%f')
# '2000-01-02 12:34:56.789012'
# >>>
#
# https://github.com/bluelabsio/records-mover/issues/95
pandas_options['date_format'] = '%Y-%m-%d %H:%M:%S.%f'
else:
pandas_options['date_format'] = '%Y-%m-%d %H:%M:%S.%f%z'
elif hints.dateformat == 'YYYY-MM-DD':
if hints.datetimeformattz == hints.datetimeformat:
pandas_options['date_format'] = '%Y-%m-%d %H:%M:%S.%f'
else:
pandas_options['date_format'] = '%Y-%m-%d %H:%M:%S.%f%z'
elif hints.dateformat == 'MM-DD-YYYY':
if hints.datetimeformattz == hints.datetimeformat:
pandas_options['date_format'] = '%m-%d-%Y %H:%M:%S.%f'
else:
pandas_options['date_format'] = '%m-%d-%Y %H:%M:%S.%f%z'
elif hints.dateformat == 'DD-MM-YYYY':
if hints.datetimeformattz == hints.datetimeformat:
pandas_options['date_format'] = '%d-%m-%Y %H:%M:%S.%f'
else:
pandas_options['date_format'] = '%d-%m-%Y %H:%M:%S.%f%z'
elif hints.dateformat == 'MM/DD/YY':
if hints.datetimeformattz == hints.datetimeformat:
pandas_options['date_format'] = '%m/%d/%y %H:%M:%S.%f'
else:
pandas_options['date_format'] = '%m/%d/%y %H:%M:%S.%f%z'
else:
cant_handle_hint(fail_if_cant_handle_hint, 'dateformat', hints)
quiet_remove(unhandled_hints, 'dateformat')
# pandas can't seem to export a date and time together :(
#
# might be nice someday to only emit the errors if the actual data
# being moved is affected by whatever limitation...
if (hints.datetimeformattz not in (f"{hints.dateformat} HH24:MI:SSOF",
f"{hints.dateformat} HH:MI:SSOF",
f"{hints.dateformat} HH24:MI:SS",
f"{hints.dateformat} HH:MI:SS",
f"{hints.dateformat} HH:MIOF",
f"{hints.dateformat} HH:MI",
f"{hints.dateformat} HH24:MIOF",
f"{hints.dateformat} HH24:MI")):
cant_handle_hint(fail_if_cant_handle_hint, 'datetimeformattz', hints)
quiet_remove(unhandled_hints, 'datetimeformattz')
valid_datetimeformat = [
f"{hints.dateformat} HH24:MI:SS",
f"{hints.dateformat} HH:MI:SS",
f"{hints.dateformat} HH24:MI",
f"{hints.dateformat} HH:MI",
]
if (hints.datetimeformat not in valid_datetimeformat):
cant_handle_hint(fail_if_cant_handle_hint, 'datetimeformat', hints)
quiet_remove(unhandled_hints, 'datetimeformat')
if hints.timeonlyformat != 'HH24:MI:SS':
cant_handle_hint(fail_if_cant_handle_hint, 'timeonlyformat', hints)
quiet_remove(unhandled_hints, 'timeonlyformat')
pandas_options['sep'] = hints.field_delimiter
quiet_remove(unhandled_hints, 'field-delimiter')
pandas_options['line_terminator'] = hints.record_terminator
quiet_remove(unhandled_hints, 'record-terminator')
return pandas_options
|
[
"records_mover.mover_types._assert_never",
"logging.getLogger"
] |
[((306, 333), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (323, 333), False, 'import logging\n'), ((1811, 1839), 'records_mover.mover_types._assert_never', '_assert_never', (['hints.quoting'], {}), '(hints.quoting)\n', (1824, 1839), False, 'from records_mover.mover_types import _assert_never\n')]
|
from raptiformica.actions.prune import ensure_neighbour_removed_from_config_by_host
from tests.testcase import TestCase
class TestEnsureNeighbourRemovedFromConfigByHost(TestCase):
def setUp(self):
self._del_neighbour_by_key = self.set_up_patch(
'raptiformica.actions.prune._del_neighbour_by_key'
)
def test_ensure_neighbour_removed_from_config_by_host_deleted_neighbour_by_host(self):
ensure_neighbour_removed_from_config_by_host('1.2.3.4')
self._del_neighbour_by_key.assert_called_once_with(
'host', '1.2.3.4'
)
|
[
"raptiformica.actions.prune.ensure_neighbour_removed_from_config_by_host"
] |
[((432, 487), 'raptiformica.actions.prune.ensure_neighbour_removed_from_config_by_host', 'ensure_neighbour_removed_from_config_by_host', (['"""1.2.3.4"""'], {}), "('1.2.3.4')\n", (476, 487), False, 'from raptiformica.actions.prune import ensure_neighbour_removed_from_config_by_host\n')]
|
from math import cos, sin
import numpy as np
from ....simulator import Agent
from .quintic_polynomials_planner import quinic_polynomials_planner
class TeacherQuinticPolynomials(Agent):
def learn(self, state, action):
raise NotImplementedError()
def explore(self, state, horizon=1):
raise NotImplementedError()
def __init__(self, world, lane):
Agent.__init__(self, world)
self.lane = lane
self.navigation_plan = None
self.goal = self.lane.end_middle()
self.goal = self.goal[0], self.goal[1], 0.0 # the angle depends on the lane direction
def plan(self, horizon=10):
trajectory = quinic_polynomials_planner(sx=self.x, sy=self.y, syaw=self.theta, sv=self.v, sa=0.0,
gx=self.goal[0], gy=self.goal[1], gyaw=self.goal[2], gv=0.0, ga=0.0,
max_accel=0.0, max_jerk=0.1, dt=1)
return np.array(trajectory[3])[:horizon]
def exploit(self, state, horizon=1):
if self.navigation_plan is None:
self.navigation_plan = self.plan()
for _ in range(horizon):
self.execute()
def execute(self, action, horizon=1):
for _ in range(horizon):
self.x = self.x + self.v * cos(action)
self.y = self.y + self.v * sin(action)
|
[
"numpy.array",
"math.cos",
"math.sin"
] |
[((953, 976), 'numpy.array', 'np.array', (['trajectory[3]'], {}), '(trajectory[3])\n', (961, 976), True, 'import numpy as np\n'), ((1292, 1303), 'math.cos', 'cos', (['action'], {}), '(action)\n', (1295, 1303), False, 'from math import cos, sin\n'), ((1343, 1354), 'math.sin', 'sin', (['action'], {}), '(action)\n', (1346, 1354), False, 'from math import cos, sin\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
postprocess
"""
import os
import argparse
from functools import reduce
import numpy as np
import mindspore as ms
from mindspore import ops, Tensor, context
import src.util as util
def cal_acc(args):
"""
:return: meta-baseline eval
"""
temp = 5.
n_shots = [args.num_shots]
file_num = int(len(os.listdir(args.post_result_path)) / args.num_shots)
aves_keys = ['tl', 'ta', 'vl', 'va']
for n_shot in n_shots:
aves_keys += ['fsa-' + str(n_shot)]
aves = {k: util.Averager() for k in aves_keys}
label_list = np.load(os.path.join(args.pre_result_path, "label.npy"), allow_pickle=True)
shape_list = np.load(os.path.join(args.pre_result_path, "shape.npy"), allow_pickle=True)
x_shot_shape = shape_list[0]
x_query_shape = shape_list[1]
shot_shape = x_shot_shape[:-3]
query_shape = x_query_shape[:-3]
x_shot_len = reduce(lambda x, y: x*y, shot_shape)
x_query_len = reduce(lambda x, y: x*y, query_shape)
for i, n_shot in enumerate(n_shots):
np.random.seed(0)
label_shot = label_list[i]
for j in range(file_num):
labels = Tensor(label_shot[j])
f = os.path.join(args.post_result_path, "nshot_" + str(i) + "_" + str(j) + "_0.bin")
x_tot = Tensor(np.fromfile(f, np.float32).reshape(args.batch_size, 512))
x_shot, x_query = x_tot[:x_shot_len], x_tot[-x_query_len:]
x_shot = x_shot.view(*shot_shape, -1)
x_query = x_query.view(*query_shape, -1)
########## cross-class bias ############
bs = x_shot.shape[0]
fs = x_shot.shape[-1]
bias = x_shot.view(bs, -1, fs).mean(1) - x_query.mean(1)
x_query = x_query + ops.ExpandDims()(bias, 1)
x_shot = x_shot.mean(axis=-2)
x_shot = ops.L2Normalize(axis=-1)(x_shot)
x_query = ops.L2Normalize(axis=-1)(x_query)
logits = ops.BatchMatMul()(x_query, x_shot.transpose(0, 2, 1))
logits = logits * temp
ret = ops.Argmax()(logits) == labels.astype(ms.int32)
acc = ret.astype(ms.float32).mean()
aves['fsa-' + str(n_shot)].add(acc.asnumpy())
for k, v in aves.items():
aves[k] = v.item()
for n_shot in n_shots:
key = 'fsa-' + str(n_shot)
print("epoch {}, {}-shot, val acc {:.4f}".format(str(1), n_shot, aves[key]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--device_target', type=str, default='CPU', choices=['Ascend', 'GPU', 'CPU'])
parser.add_argument('--dataset', default='mini-imagenet')
parser.add_argument('--post_result_path', default='./result_Files')
parser.add_argument('--pre_result_path', type=str, default='./preprocess_Result')
parser.add_argument('--batch_size', type=int, default=320)
parser.add_argument('--num_shots', type=int, default=1)
args_opt = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, save_graphs=False)
cal_acc(args_opt)
|
[
"os.listdir",
"mindspore.context.set_context",
"mindspore.ops.Argmax",
"numpy.random.seed",
"argparse.ArgumentParser",
"mindspore.ops.ExpandDims",
"numpy.fromfile",
"mindspore.ops.L2Normalize",
"mindspore.Tensor",
"mindspore.ops.BatchMatMul",
"functools.reduce",
"os.path.join",
"src.util.Averager"
] |
[((1547, 1585), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'shot_shape'], {}), '(lambda x, y: x * y, shot_shape)\n', (1553, 1585), False, 'from functools import reduce\n'), ((1602, 1641), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'query_shape'], {}), '(lambda x, y: x * y, query_shape)\n', (1608, 1641), False, 'from functools import reduce\n'), ((3108, 3133), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3131, 3133), False, 'import argparse\n'), ((3618, 3724), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': 'args_opt.device_target', 'save_graphs': '(False)'}), '(mode=context.GRAPH_MODE, device_target=args_opt.\n device_target, save_graphs=False)\n', (3637, 3724), False, 'from mindspore import ops, Tensor, context\n'), ((1168, 1183), 'src.util.Averager', 'util.Averager', ([], {}), '()\n', (1181, 1183), True, 'import src.util as util\n'), ((1230, 1277), 'os.path.join', 'os.path.join', (['args.pre_result_path', '"""label.npy"""'], {}), "(args.pre_result_path, 'label.npy')\n", (1242, 1277), False, 'import os\n'), ((1323, 1370), 'os.path.join', 'os.path.join', (['args.pre_result_path', '"""shape.npy"""'], {}), "(args.pre_result_path, 'shape.npy')\n", (1335, 1370), False, 'import os\n'), ((1690, 1707), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1704, 1707), True, 'import numpy as np\n'), ((1798, 1819), 'mindspore.Tensor', 'Tensor', (['label_shot[j]'], {}), '(label_shot[j])\n', (1804, 1819), False, 'from mindspore import ops, Tensor, context\n'), ((987, 1020), 'os.listdir', 'os.listdir', (['args.post_result_path'], {}), '(args.post_result_path)\n', (997, 1020), False, 'import os\n'), ((2488, 2512), 'mindspore.ops.L2Normalize', 'ops.L2Normalize', ([], {'axis': '(-1)'}), '(axis=-1)\n', (2503, 2512), False, 'from mindspore import ops, Tensor, context\n'), ((2543, 2567), 'mindspore.ops.L2Normalize', 'ops.L2Normalize', ([], {'axis': '(-1)'}), '(axis=-1)\n', (2558, 2567), False, 'from mindspore import ops, Tensor, context\n'), ((2598, 2615), 'mindspore.ops.BatchMatMul', 'ops.BatchMatMul', ([], {}), '()\n', (2613, 2615), False, 'from mindspore import ops, Tensor, context\n'), ((2398, 2414), 'mindspore.ops.ExpandDims', 'ops.ExpandDims', ([], {}), '()\n', (2412, 2414), False, 'from mindspore import ops, Tensor, context\n'), ((2707, 2719), 'mindspore.ops.Argmax', 'ops.Argmax', ([], {}), '()\n', (2717, 2719), False, 'from mindspore import ops, Tensor, context\n'), ((1944, 1970), 'numpy.fromfile', 'np.fromfile', (['f', 'np.float32'], {}), '(f, np.float32)\n', (1955, 1970), True, 'import numpy as np\n')]
|
from picamera import PiCamera
from picamera.exc import PiCameraMMALError
from time import sleep
from io import StringIO
from glob import glob
from os.path import getsize
if __name__ == "__main__":
tries = 0
while tries < 5:
try:
cam = PiCamera(camera_num=0)
except PiCameraMMALError:
# Sometimes happens if something else is hogging the resource
sleep(10)
continue
cam.resolution = (512, 512)
cam.start_preview()
sleep(4)
byte_buffer = StringIO()
byte_buffer.seek(0)
cam.start_recording('/home/test.mjpeg', format='mjpeg')
cam.wait_recording(10)
cam.stop_recording()
cam.capture('/home/foo.jpeg')
cam.stop_preview()
print("Recording")
cam.close()
print(glob("/home/*"))
print(getsize('home/test.mjpeg'))
print(getsize('home/foo.jpeg'))
print(byte_buffer.read())
|
[
"io.StringIO",
"os.path.getsize",
"time.sleep",
"glob.glob",
"picamera.PiCamera"
] |
[((511, 519), 'time.sleep', 'sleep', (['(4)'], {}), '(4)\n', (516, 519), False, 'from time import sleep\n'), ((542, 552), 'io.StringIO', 'StringIO', ([], {}), '()\n', (550, 552), False, 'from io import StringIO\n'), ((265, 287), 'picamera.PiCamera', 'PiCamera', ([], {'camera_num': '(0)'}), '(camera_num=0)\n', (273, 287), False, 'from picamera import PiCamera\n'), ((833, 848), 'glob.glob', 'glob', (['"""/home/*"""'], {}), "('/home/*')\n", (837, 848), False, 'from glob import glob\n'), ((864, 890), 'os.path.getsize', 'getsize', (['"""home/test.mjpeg"""'], {}), "('home/test.mjpeg')\n", (871, 890), False, 'from os.path import getsize\n'), ((906, 930), 'os.path.getsize', 'getsize', (['"""home/foo.jpeg"""'], {}), "('home/foo.jpeg')\n", (913, 930), False, 'from os.path import getsize\n'), ((408, 417), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (413, 417), False, 'from time import sleep\n')]
|
from redis.client import Redis
redis = Redis()
__all__ = ['redis']
|
[
"redis.client.Redis"
] |
[((40, 47), 'redis.client.Redis', 'Redis', ([], {}), '()\n', (45, 47), False, 'from redis.client import Redis\n')]
|
"""
[2020-02-03] Modified version of the original qcodes.plots.colors
Mofied by <NAME> for Measurement Control
It modules makes available all the colors maps from the qcodes, context menu of
the color bar from pyqtgraph, the circular colormap created by me (Victo),
and the reversed version of all of them.
Feel free to add new colors
See "make_qcodes_anglemap" and "make_anglemap45_colorlist" below to get you
started.
"""
from pycqed.analysis.tools.plotting import make_anglemap45_colorlist
# default colors and colorscales, taken from plotly
color_cycle = [
"#1f77b4", # muted blue
"#ff7f0e", # safety orange
"#2ca02c", # cooked asparagus green
"#d62728", # brick red
"#9467bd", # muted purple
"#8c564b", # chestnut brown
"#e377c2", # raspberry yogurt pink
"#7f7f7f", # middle gray
"#bcbd22", # curry yellow-green
"#17becf", # blue-teal
]
colorscales_raw = {
"Greys": [[0, "rgb(0,0,0)"], [1, "rgb(255,255,255)"]],
"YlGnBu": [
[0, "rgb(8, 29, 88)"],
[0.125, "rgb(37, 52, 148)"],
[0.25, "rgb(34, 94, 168)"],
[0.375, "rgb(29, 145, 192)"],
[0.5, "rgb(65, 182, 196)"],
[0.625, "rgb(127, 205, 187)"],
[0.75, "rgb(199, 233, 180)"],
[0.875, "rgb(237, 248, 217)"],
[1, "rgb(255, 255, 217)"],
],
"Greens": [
[0, "rgb(0, 68, 27)"],
[0.125, "rgb(0, 109, 44)"],
[0.25, "rgb(35, 139, 69)"],
[0.375, "rgb(65, 171, 93)"],
[0.5, "rgb(116, 196, 118)"],
[0.625, "rgb(161, 217, 155)"],
[0.75, "rgb(199, 233, 192)"],
[0.875, "rgb(229, 245, 224)"],
[1, "rgb(247, 252, 245)"],
],
"YlOrRd": [
[0, "rgb(128, 0, 38)"],
[0.125, "rgb(189, 0, 38)"],
[0.25, "rgb(227, 26, 28)"],
[0.375, "rgb(252, 78, 42)"],
[0.5, "rgb(253, 141, 60)"],
[0.625, "rgb(254, 178, 76)"],
[0.75, "rgb(254, 217, 118)"],
[0.875, "rgb(255, 237, 160)"],
[1, "rgb(255, 255, 204)"],
],
"bluered": [[0, "rgb(0,0,255)"], [1, "rgb(255,0,0)"]],
# modified RdBu based on
# www.sandia.gov/~kmorel/documents/ColorMaps/ColorMapsExpanded.pdf
"RdBu": [
[0, "rgb(5, 10, 172)"],
[0.35, "rgb(106, 137, 247)"],
[0.5, "rgb(190,190,190)"],
[0.6, "rgb(220, 170, 132)"],
[0.7, "rgb(230, 145, 90)"],
[1, "rgb(178, 10, 28)"],
],
# Scale for non-negative numeric values
"Reds": [
[0, "rgb(220, 220, 220)"],
[0.2, "rgb(245, 195, 157)"],
[0.4, "rgb(245, 160, 105)"],
[1, "rgb(178, 10, 28)"],
],
# Scale for non-positive numeric values
"Blues": [
[0, "rgb(5, 10, 172)"],
[0.35, "rgb(40, 60, 190)"],
[0.5, "rgb(70, 100, 245)"],
[0.6, "rgb(90, 120, 245)"],
[0.7, "rgb(106, 137, 247)"],
[1, "rgb(220, 220, 220)"],
],
"picnic": [
[0, "rgb(0,0,255)"],
[0.1, "rgb(51,153,255)"],
[0.2, "rgb(102,204,255)"],
[0.3, "rgb(153,204,255)"],
[0.4, "rgb(204,204,255)"],
[0.5, "rgb(255,255,255)"],
[0.6, "rgb(255,204,255)"],
[0.7, "rgb(255,153,255)"],
[0.8, "rgb(255,102,204)"],
[0.9, "rgb(255,102,102)"],
[1, "rgb(255,0,0)"],
],
"rainbow": [
[0, "rgb(150,0,90)"],
[0.125, "rgb(0, 0, 200)"],
[0.25, "rgb(0, 25, 255)"],
[0.375, "rgb(0, 152, 255)"],
[0.5, "rgb(44, 255, 150)"],
[0.625, "rgb(151, 255, 0)"],
[0.75, "rgb(255, 234, 0)"],
[0.875, "rgb(255, 111, 0)"],
[1, "rgb(255, 0, 0)"],
],
"portland": [
[0, "rgb(12,51,131)"],
[0.25, "rgb(10,136,186)"],
[0.5, "rgb(242,211,56)"],
[0.75, "rgb(242,143,56)"],
[1, "rgb(217,30,30)"],
],
"jet": [
[0, "rgb(0,0,131)"],
[0.125, "rgb(0,60,170)"],
[0.375, "rgb(5,255,255)"],
[0.625, "rgb(255,255,0)"],
[0.875, "rgb(250,0,0)"],
[1, "rgb(128,0,0)"],
],
"hot": [
[0, "rgb(0,0,0)"],
[0.3, "rgb(230,0,0)"],
[0.6, "rgb(255,210,0)"],
[1, "rgb(255,255,255)"],
],
"blackbody": [
[0, "rgb(0,0,0)"],
[0.2, "rgb(230,0,0)"],
[0.4, "rgb(230,210,0)"],
[0.7, "rgb(255,255,255)"],
[1, "rgb(160,200,255)"],
],
"earth": [
[0, "rgb(0,0,130)"],
[0.1, "rgb(0,180,180)"],
[0.2, "rgb(40,210,40)"],
[0.4, "rgb(230,230,50)"],
[0.6, "rgb(120,70,20)"],
[1, "rgb(255,255,255)"],
],
"electric": [
[0, "rgb(0,0,0)"],
[0.15, "rgb(30,0,100)"],
[0.4, "rgb(120,0,100)"],
[0.6, "rgb(160,90,0)"],
[0.8, "rgb(230,200,0)"],
[1, "rgb(255,250,220)"],
],
"viridis": [
[0, "#440154"],
[0.06274509803921569, "#48186a"],
[0.12549019607843137, "#472d7b"],
[0.18823529411764706, "#424086"],
[0.25098039215686274, "#3b528b"],
[0.3137254901960784, "#33638d"],
[0.3764705882352941, "#2c728e"],
[0.4392156862745098, "#26828e"],
[0.5019607843137255, "#21918c"],
[0.5647058823529412, "#1fa088"],
[0.6274509803921569, "#28ae80"],
[0.6901960784313725, "#3fbc73"],
[0.7529411764705882, "#5ec962"],
[0.8156862745098039, "#84d44b"],
[0.8784313725490196, "#addc30"],
[0.9411764705882353, "#d8e219"],
[1, "#fde725"],
],
}
# Extracted https://github.com/pyqtgraph/pyqtgraph/blob/develop/pyqtgraph/graphicsItems/GradientEditorItem.py
Gradients = {
"thermal": [
(0.3333, (185, 0, 0, 255)),
(0.6666, (255, 220, 0, 255)),
(1, (255, 255, 255, 255)),
(0, (0, 0, 0, 255)),
],
"flame": [
(0.2, (7, 0, 220, 255)),
(0.5, (236, 0, 134, 255)),
(0.8, (246, 246, 0, 255)),
(1.0, (255, 255, 255, 255)),
(0.0, (0, 0, 0, 255)),
],
"yellowy": [
(0.0, (0, 0, 0, 255)),
(0.2328863796753704, (32, 0, 129, 255)),
(0.8362738179251941, (255, 255, 0, 255)),
(0.5257586450247, (115, 15, 255, 255)),
(1.0, (255, 255, 255, 255)),
],
"bipolar": [
(0.0, (0, 255, 255, 255)),
(1.0, (255, 255, 0, 255)),
(0.5, (0, 0, 0, 255)),
(0.25, (0, 0, 255, 255)),
(0.75, (255, 0, 0, 255)),
],
"spectrum": [
(1.0, (255, 0, 255, 255)),
(0.0, (255, 0, 0, 255)),
], # this is a hsv, didn't patch qcodes to allow the specification of that part...
"cyclic": [
(0.0, (255, 0, 4, 255)),
(1.0, (255, 0, 0, 255)),
], # this is a hsv, didn't patch qcodes to allow the specification of that part...
# "greyclip": [
# (0.0, (0, 0, 0, 255)),
# (0.99, (255, 255, 255, 255)),
# (1.0, (255, 0, 0, 255)),
# ],
"grey": [(0.0, (0, 0, 0, 255)), (1.0, (255, 255, 255, 255))],
# Perceptually uniform sequential colormaps from Matplotlib 2.0
"viridis": [
(0.0, (68, 1, 84, 255)),
(0.25, (58, 82, 139, 255)),
(0.5, (32, 144, 140, 255)),
(0.75, (94, 201, 97, 255)),
(1.0, (253, 231, 36, 255)),
],
"inferno": [
(0.0, (0, 0, 3, 255)),
(0.25, (87, 15, 109, 255)),
(0.5, (187, 55, 84, 255)),
(0.75, (249, 142, 8, 255)),
(1.0, (252, 254, 164, 255)),
],
"plasma": [
(0.0, (12, 7, 134, 255)),
(0.25, (126, 3, 167, 255)),
(0.5, (203, 71, 119, 255)),
(0.75, (248, 149, 64, 255)),
(1.0, (239, 248, 33, 255)),
],
"magma": [
(0.0, (0, 0, 3, 255)),
(0.25, (80, 18, 123, 255)),
(0.5, (182, 54, 121, 255)),
(0.75, (251, 136, 97, 255)),
(1.0, (251, 252, 191, 255)),
],
}
def make_qcodes_anglemap45():
anglemap_colorlist = make_anglemap45_colorlist(N=9, use_hpl=False)
len_colorlist = len(anglemap_colorlist)
color_scale = [
[i / (len_colorlist - 1), "rgb" + repr(tuple((int(x * 255) for x in col)))]
for i, col in enumerate(anglemap_colorlist)
]
return color_scale
qcodes_anglemap45 = make_qcodes_anglemap45()
colorscales_raw["anglemap45"] = qcodes_anglemap45
def make_rgba(colorscale):
return [(v, one_rgba(c)) for v, c in colorscale]
def one_rgba(c):
"""
convert a single color value to (r, g, b, a)
input can be an rgb string 'rgb(r,g,b)', '#rrggbb'
if we decide we want more we can make more, but for now this is just
to convert plotly colorscales to pyqtgraph tuples
"""
if c[0] == "#" and len(c) == 7:
return (int(c[1:3], 16), int(c[3:5], 16), int(c[5:7], 16), 255)
if c[:4] == "rgb(":
return tuple(map(int, c[4:-1].split(","))) + (255,)
raise ValueError("one_rgba only supports rgb(r,g,b) and #rrggbb colors")
colorscales = {}
for scale_name, scale in colorscales_raw.items():
colorscales[scale_name] = make_rgba(scale)
for scale_name, scale in Gradients.items():
colorscales[scale_name] = scale
for name, scale in list(colorscales.items()):
last_idx = len(scale) - 1
reversed_scale = [
(scale[last_idx - i][0], color[1]) for i, color in enumerate(scale)
]
colorscales[name + "_reversed"] = reversed_scale
# Generate also all scales with cliping at green
for name, scale in list(colorscales.items()):
clip_percent = 0.03
clip_color = (0, 255, 0, 255)
scale_low = list(scale)
scale_low.insert(1, scale[0])
scale_low[0] = (0.0, clip_color)
if scale[1][0] < clip_percent:
scale_low[1] = ((scale[1][0] + scale[0][0]) / 2, scale_low[1][1])
else:
scale_low[1] = (clip_percent, scale_low[1][1])
colorscales[name + "_clip_low"] = scale_low
scale_high = list(scale)
scale_high.insert(-1, scale[-1])
scale_high[-1] = (1.0, clip_color)
if scale[-2][0] > 1 - clip_percent:
scale_high[-2] = ((scale[-1][0] + scale[-2][0]) / 2, scale_high[-2][1])
else:
scale_high[-2] = (1 - clip_percent, scale_high[-2][1])
colorscales[name + "_clip_high"] = scale_high
|
[
"pycqed.analysis.tools.plotting.make_anglemap45_colorlist"
] |
[((7886, 7931), 'pycqed.analysis.tools.plotting.make_anglemap45_colorlist', 'make_anglemap45_colorlist', ([], {'N': '(9)', 'use_hpl': '(False)'}), '(N=9, use_hpl=False)\n', (7911, 7931), False, 'from pycqed.analysis.tools.plotting import make_anglemap45_colorlist\n')]
|
import django
from channels.routing import ProtocolTypeRouter
from baserow.ws.routers import websocket_router
from django.core.asgi import get_asgi_application
django.setup()
django_asgi_app = get_asgi_application()
application = ProtocolTypeRouter(
{"http": django_asgi_app, "websocket": websocket_router}
)
|
[
"django.setup",
"django.core.asgi.get_asgi_application",
"channels.routing.ProtocolTypeRouter"
] |
[((164, 178), 'django.setup', 'django.setup', ([], {}), '()\n', (176, 178), False, 'import django\n'), ((198, 220), 'django.core.asgi.get_asgi_application', 'get_asgi_application', ([], {}), '()\n', (218, 220), False, 'from django.core.asgi import get_asgi_application\n'), ((236, 312), 'channels.routing.ProtocolTypeRouter', 'ProtocolTypeRouter', (["{'http': django_asgi_app, 'websocket': websocket_router}"], {}), "({'http': django_asgi_app, 'websocket': websocket_router})\n", (254, 312), False, 'from channels.routing import ProtocolTypeRouter\n')]
|
######################################################################################################################
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import re,boto3,logging,os
from datetime import date
from botocore.exceptions import ClientError
class AWSTrustedAdvisorExplorerGenericException(Exception): pass
logger = logging.getLogger()
if "LOG_LEVEL" in os.environ:
numeric_level = getattr(logging, os.environ['LOG_LEVEL'].upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logger.setLevel(level=numeric_level)
def sanitize_json(x):
d = x.copy()
if os.environ['MASK_PII'].lower() == 'true':
for k, v in d.items():
if 'AccountId' in k:
d[k] = sanitize_string(v)
if 'AccountName' in k:
d[k] = v[:3]+'-MASKED-'+v[-3:]
if 'AccountEmail' in k:
d[k] = v[:3]+'-MASKED-'+v[-3:]
return d
def sanitize_string(x):
y = str(x)
if os.environ['MASK_PII'].lower() == 'true':
pattern=re.compile('\d{12}')
y = re.sub(pattern,lambda match: ((match.group()[1])+'XXXXXXX'+(match.group()[-4:])), y)
return y
def refresh_trusted_advisor_checks(supportClient,checkId):
logger.info('Refreshing Trusted Advisor Check:'+checkId)
response = supportClient.refresh_trusted_advisor_check(
checkId=checkId
)
logger.info(sanitize_json(response))
return response
def checkAssumeRoleFailure(error):
if "(AccessDenied) when calling the AssumeRole operation" in error:
pattern=re.compile('.*iam::(\d{12}):.*$')
match=pattern.match(error)
logger.info('Assume Role Error for Account:'+match.group(1))
if match != None:
key_name='Logs/AssumeRoleFailure/'+ str(date.today().year)+ '/'+str(date.today().month)+'/'+str(date.today().day)+'/'+str(match.group(1))+'.log'
client = boto3.client('s3')
client.put_object(ACL='bucket-owner-full-control',StorageClass='STANDARD',Body=error, Bucket=os.environ['S3BucketName'],Key=key_name)
return
#Assume Role in Child Account
def assumeRole(accountId):
logger.info('Variables passed to assumeRole(): '+sanitize_string(accountId))
roleArn="arn:aws:iam::"+str(accountId)+":role/"+os.environ['IAMRoleName']
#STS assume role call
stsClient = boto3.client('sts')
roleCredentials = stsClient.assume_role(RoleArn=roleArn, RoleSessionName="AWSTrustedAdvisorExplorerAssumeRole")
return roleCredentials
def lambda_handler(event, context):
try:
logger.info(sanitize_json(event))
logger.info("Assume Role in child account")
roleCredentials=assumeRole(event['AccountId'])
logger.info("Create boto3 support client using the temporary credentials")
supportClient=boto3.client("support",region_name="us-east-1",
aws_access_key_id = roleCredentials['Credentials']['AccessKeyId'],
aws_secret_access_key =
roleCredentials['Credentials']['SecretAccessKey'],
aws_session_token=roleCredentials['Credentials']['SessionToken'])
response = refresh_trusted_advisor_checks(
supportClient, event['CheckId'])
logger.info("Append the Refresh Status '"+response['status']['status']+"' to response." +
" This will be consumed by downstream Lambda")
event["RefreshStatus"] = response['status']['status']
return event
except ClientError as e:
checkAssumeRoleFailure(str(e))
e=sanitize_string(e)
logger.error("Unexpected client error %s" % e)
raise AWSTrustedAdvisorExplorerGenericException(e)
except Exception as f:
checkAssumeRoleFailure(str(f))
f=sanitize_string(f)
logger.error("Unexpected exception: %s" % f)
raise AWSTrustedAdvisorExplorerGenericException(f)
|
[
"re.compile",
"datetime.date.today",
"logging.getLogger",
"boto3.client"
] |
[((1621, 1640), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1638, 1640), False, 'import re, boto3, logging, os\n'), ((3740, 3759), 'boto3.client', 'boto3.client', (['"""sts"""'], {}), "('sts')\n", (3752, 3759), False, 'import re, boto3, logging, os\n'), ((2393, 2414), 're.compile', 're.compile', (['"""\\\\d{12}"""'], {}), "('\\\\d{12}')\n", (2403, 2414), False, 'import re, boto3, logging, os\n'), ((2934, 2968), 're.compile', 're.compile', (['""".*iam::(\\\\d{12}):.*$"""'], {}), "('.*iam::(\\\\d{12}):.*$')\n", (2944, 2968), False, 'import re, boto3, logging, os\n'), ((4228, 4495), 'boto3.client', 'boto3.client', (['"""support"""'], {'region_name': '"""us-east-1"""', 'aws_access_key_id': "roleCredentials['Credentials']['AccessKeyId']", 'aws_secret_access_key': "roleCredentials['Credentials']['SecretAccessKey']", 'aws_session_token': "roleCredentials['Credentials']['SessionToken']"}), "('support', region_name='us-east-1', aws_access_key_id=\n roleCredentials['Credentials']['AccessKeyId'], aws_secret_access_key=\n roleCredentials['Credentials']['SecretAccessKey'], aws_session_token=\n roleCredentials['Credentials']['SessionToken'])\n", (4240, 4495), False, 'import re, boto3, logging, os\n'), ((3288, 3306), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (3300, 3306), False, 'import re, boto3, logging, os\n'), ((3217, 3229), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3227, 3229), False, 'from datetime import date\n'), ((3189, 3201), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3199, 3201), False, 'from datetime import date\n'), ((3161, 3173), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3171, 3173), False, 'from datetime import date\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.box_utils import match
class MultiBoxLoss_combined(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap, encode_target):
super(MultiBoxLoss_combined, self).__init__()
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
self.variance = [0.1, 0.2]
def forward(self, predictions, priors, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
ground_truth (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
# loc_data[batch_size, num_priors, 4]
# conf_data[batch_size, num_priors, num_classes]
# obj_data[batch_size, num_priors, 2]
loc_data, conf_data, obj_data = predictions
device = loc_data.device
targets = [anno.to(device) for anno in targets]
num = loc_data.size(0)
num_priors = priors.size(0)
# match priors (default boxes) and ground truth boxes
loc_t = torch.Tensor(num, num_priors, 4).to(device)
conf_t = torch.Tensor(num, num_priors, 2).to(device)
obj_t = torch.BoolTensor(num, num_priors).to(device)
# match priors with gt
for idx in range(num): # batch_size
truths = targets[idx][:, :-2].data # [obj_num, 4]
labels = targets[idx][:, -2:].data # [obj_num]
defaults = priors.data # [num_priors,4]
match(self.threshold, truths, defaults, self.variance, labels, loc_t, conf_t, obj_t, idx)
pos = (conf_t[:, :, 0] > 0).bool() # [num, num_priors]
num_pos = (conf_t[:, :, 1] * pos.float()).sum(1, keepdim=True).long()
# Localization Loss (Smooth L1)
# Shape: [batch,num_priors,4]
loc_p = loc_data[pos]
loc_t = loc_t[pos]
loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='none')
weight_pos = conf_t[pos][:, 1]
loss_l = torch.sum(torch.sum(loss_l, dim=1) * weight_pos)
# Compute object loss across batch for hard negative mining
with torch.no_grad():
loss_obj = F.cross_entropy(obj_data.view(-1, 2), obj_t.long().view(-1), reduction='none')
# Hard Negative Mining
loss_obj[obj_t.view(-1)] = 0 # filter out pos boxes (label>0) and ignored boxes (label=-1) for now
loss_obj = loss_obj.view(num, -1)
_, loss_idx = loss_obj.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_neg = torch.clamp(self.negpos_ratio * num_pos, max=num_priors - 1)
neg = idx_rank < num_neg.expand_as(idx_rank) # [num, num_priors]
# Object Loss Including Positive and Negative Examples
mask = pos | neg
weight = conf_t[mask][:, 1]
loss_obj = torch.sum(F.cross_entropy(obj_data[mask], obj_t[mask].long(), reduction='none') * weight)
# Confidence Loss (cosine distance to classes center)
# pos [num, num_priors]
# conf_data [num, num_priors, feature_dim]
batch_conf = conf_data.view(-1, self.num_classes-1)
# Compute max conf across batch for hard negative mining (logit-combined)
batch_obj = obj_data.view(-1, 2) # [num*num_priors, 2]
logit_0 = batch_obj[:, 0].unsqueeze(1) + torch.log(
torch.exp(batch_conf).sum(dim=1, keepdim=True))
logit_k = batch_obj[:, 1].unsqueeze(1).expand_as(batch_conf) + batch_conf
logit = torch.cat((logit_0, logit_k), 1)
# Confidence Loss Including Positive and Negative Examples
logit = logit.view(num, -1, self.num_classes)
loss_c = torch.sum(F.cross_entropy(logit[mask], conf_t[mask][:, 0].long(), reduction='none') * weight)
N = num_pos.sum()
loss_l /= N
loss_c /= N
loss_obj /= N
return {'loss_box_reg': loss_l, 'loss_cls': loss_c, 'loss_obj': loss_obj}
|
[
"torch.sum",
"torch.cat",
"torch.exp",
"torch.clamp",
"torch.Tensor",
"torch.no_grad",
"torch.nn.functional.smooth_l1_loss",
"torch.BoolTensor",
"utils.box_utils.match"
] |
[((3576, 3624), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['loc_p', 'loc_t'], {'reduction': '"""none"""'}), "(loc_p, loc_t, reduction='none')\n", (3592, 3624), True, 'import torch.nn.functional as F\n'), ((5193, 5225), 'torch.cat', 'torch.cat', (['(logit_0, logit_k)', '(1)'], {}), '((logit_0, logit_k), 1)\n', (5202, 5225), False, 'import torch\n'), ((3190, 3283), 'utils.box_utils.match', 'match', (['self.threshold', 'truths', 'defaults', 'self.variance', 'labels', 'loc_t', 'conf_t', 'obj_t', 'idx'], {}), '(self.threshold, truths, defaults, self.variance, labels, loc_t,\n conf_t, obj_t, idx)\n', (3195, 3283), False, 'from utils.box_utils import match\n'), ((3812, 3827), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3825, 3827), False, 'import torch\n'), ((4249, 4309), 'torch.clamp', 'torch.clamp', (['(self.negpos_ratio * num_pos)'], {'max': '(num_priors - 1)'}), '(self.negpos_ratio * num_pos, max=num_priors - 1)\n', (4260, 4309), False, 'import torch\n'), ((2748, 2780), 'torch.Tensor', 'torch.Tensor', (['num', 'num_priors', '(4)'], {}), '(num, num_priors, 4)\n', (2760, 2780), False, 'import torch\n'), ((2809, 2841), 'torch.Tensor', 'torch.Tensor', (['num', 'num_priors', '(2)'], {}), '(num, num_priors, 2)\n', (2821, 2841), False, 'import torch\n'), ((2869, 2902), 'torch.BoolTensor', 'torch.BoolTensor', (['num', 'num_priors'], {}), '(num, num_priors)\n', (2885, 2902), False, 'import torch\n'), ((3691, 3715), 'torch.sum', 'torch.sum', (['loss_l'], {'dim': '(1)'}), '(loss_l, dim=1)\n', (3700, 3715), False, 'import torch\n'), ((5047, 5068), 'torch.exp', 'torch.exp', (['batch_conf'], {}), '(batch_conf)\n', (5056, 5068), False, 'import torch\n')]
|
import logging
import os
import re
import uuid
from typing import List, Optional, FrozenSet
import pytest
import argclass
class TestBasics:
class Parser(argclass.Parser):
integers: List[int] = argclass.Argument(
"integers", type=int,
nargs=argclass.Nargs.ONE_OR_MORE, metavar="N",
help="an integer for the accumulator",
)
accumulate = argclass.Argument(
"--sum", action=argclass.Actions.STORE_CONST, const=sum,
default=max, help="sum the integers (default: find the max)",
)
def test_simple(self):
parser = self.Parser()
parser.parse_args(["1", "2", "3"])
assert parser.integers
assert parser.integers == [1, 2, 3]
class HostPortGroup(argclass.Group):
host: str
port: int
class TestFoo:
class Parser(argclass.Parser):
foo: str = argclass.Argument(help="foo")
http: HostPortGroup = HostPortGroup(
title="HTTP host and port", prefix="api", defaults={
"port": 80, "host": "0.0.0.0",
},
)
grpc: HostPortGroup = HostPortGroup(
title="GRPC host and port",
defaults={"port": 6000, "host": "::"},
)
def test_simple(self):
parser = self.Parser()
parser.parse_args(["--foo", "bar"])
assert parser.foo == "bar"
parser.parse_args(["--foo=bar"])
assert parser.foo == "bar"
def test_group(self):
parser = self.Parser()
parser.parse_args(["--foo", "bar"])
assert parser.foo == "bar"
parser.parse_args([
"--foo=bar",
"--api-host=127.0.0.1",
"--api-port=8080",
"--grpc-host=127.0.0.2",
"--grpc-port=9000",
])
assert parser.foo == "bar"
assert parser.http.host == "127.0.0.1"
assert parser.http.port == 8080
assert parser.grpc.host == "127.0.0.2"
assert parser.grpc.port == 9000
def test_group_defaults(self):
parser = self.Parser()
parser.parse_args(["--foo=bar"])
assert parser.foo == "bar"
assert parser.http.host == "0.0.0.0"
assert parser.http.port == 80
assert parser.grpc.host == "::"
assert parser.grpc.port == 6000
def test_parser_repr(self):
parser = self.Parser()
r = repr(parser)
assert r == "<Parser: 1 arguments, 2 groups, 0 subparsers>"
def test_access_to_not_parsed_attrs(self):
parser = self.Parser()
with pytest.raises(AttributeError):
_ = parser.foo
def test_environment(self, request: pytest.FixtureRequest):
prefix = re.sub(r"\d+", "", uuid.uuid4().hex + uuid.uuid4().hex).upper()
expected = uuid.uuid4().hex
os.environ[f"{prefix}_FOO"] = expected
request.addfinalizer(lambda: os.environ.pop(f"{prefix}_FOO"))
parser = self.Parser(auto_env_var_prefix=f"{prefix}_")
parser.parse_args([])
assert parser.foo == expected
def test_env_var(request: pytest.FixtureRequest):
env_var = re.sub(r"\d+", "", uuid.uuid4().hex + uuid.uuid4().hex).upper()
class Parser(argclass.Parser):
foo: str = argclass.Argument(env_var=env_var)
expected = uuid.uuid4().hex
os.environ[env_var] = expected
request.addfinalizer(lambda: os.environ.pop(env_var))
parser = Parser()
parser.parse_args([])
assert parser.foo == expected
def test_nargs():
class Parser(argclass.Parser):
foo: List[int] = argclass.Argument(
nargs=argclass.Nargs.ZERO_OR_MORE, type=int,
)
bar: int = argclass.Argument(nargs="*")
spam: int = argclass.Argument(nargs=1)
parser = Parser()
parser.parse_args(["--foo", "1", "2", "--bar=3", "--spam=4"])
assert parser.foo == [1, 2]
assert parser.bar == [3]
assert parser.spam == [4]
def test_group_aliases():
class Group(argclass.Group):
foo: str = argclass.Argument("-F")
class Parser(argclass.Parser):
group = Group()
parser = Parser()
parser.parse_args(["-F", "egg"])
assert parser.group.foo == "egg"
def test_short_parser_definition():
class Parser(argclass.Parser):
foo: str
bar: int
parser = Parser()
parser.parse_args(["--foo=spam", "--bar=1"])
assert parser.foo == "spam"
assert parser.bar == 1
def test_print_help(capsys: pytest.CaptureFixture):
class Parser(argclass.Parser):
foo: str
bar: int = 0
parser = Parser()
parser.print_help()
captured = capsys.readouterr()
assert "--foo" in captured.out
assert "--bar" in captured.out
assert "--help" in captured.out
assert "--foo FOO" in captured.out
assert "[--bar BAR]" in captured.out
def test_print_log_level(capsys: pytest.CaptureFixture):
class Parser(argclass.Parser):
log_level: int = argclass.LogLevel
parser = Parser()
parser.parse_args(["--log-level", "info"])
assert parser.log_level == logging.INFO
parser.parse_args(["--log-level=warning"])
assert parser.log_level == logging.WARNING
def test_optional_type():
class Parser(argclass.Parser):
flag: bool
optional: Optional[bool]
parser = Parser()
parser.parse_args([])
assert parser.optional is None
assert not parser.flag
parser.parse_args(["--flag"])
assert parser.flag
for variant in ("yes", "Y", "yeS", "enable", "ENABLED", "1"):
parser.parse_args([f"--optional={variant}"])
assert parser.optional is True
for variant in ("no", "crap", "false", "disabled", "MY_HANDS_TYPING_WORDS"):
parser.parse_args([f"--optional={variant}"])
assert parser.optional is False
def test_argument_defaults():
class Parser(argclass.Parser):
debug: bool = False
confused_default: bool = True
pool_size: int = 4
forks: int = 2
parser = Parser()
parser.parse_args([])
assert parser.debug is False
assert parser.confused_default is True
assert parser.pool_size == 4
assert parser.forks == 2
parser.parse_args([
"--debug", "--forks=8", "--pool-size=2", "--confused-default",
])
assert parser.debug is True
assert parser.confused_default is False
assert parser.pool_size == 2
assert parser.forks == 8
def test_inheritance():
class AddressPort(argclass.Group):
address: str
port: int
class Parser(argclass.Parser, AddressPort):
pass
parser = Parser()
parser.parse_args(["--address=0.0.0.0", "--port=9876"])
assert parser.address == "0.0.0.0"
assert parser.port == 9876
def test_config_for_required(tmp_path):
class Parser(argclass.Parser):
required: int = argclass.Argument(required=True)
config_path = tmp_path / "config.ini"
with open(config_path, "w") as fp:
fp.write("[DEFAULT]\n")
fp.write("required = 10\n")
fp.write("\n")
parser = Parser(config_files=[config_path])
parser.parse_args([])
assert parser.required == 10
parser = Parser(config_files=[])
with pytest.raises(SystemExit):
parser.parse_args([])
def test_minimal_optional(tmp_path):
class Parser(argclass.Parser):
optional: Optional[int]
parser = Parser()
parser.parse_args([])
assert parser.optional is None
parser.parse_args(["--optional=10"])
assert parser.optional == 10
def test_optional_is_not_required(tmp_path):
class Parser(argclass.Parser):
optional: Optional[int] = argclass.Argument(required=False)
parser = Parser()
parser.parse_args([])
assert parser.optional is None
parser.parse_args(["--optional=20"])
assert parser.optional == 20
def test_minimal_required(tmp_path):
class Parser(argclass.Parser):
required: int
parser = Parser()
with pytest.raises(SystemExit):
parser.parse_args([])
parser.parse_args(["--required=20"])
assert parser.required == 20
def test_log_group():
class LogGroup(argclass.Group):
level: int = argclass.LogLevel
format = argclass.Argument(
choices=("json", "stream"), default="stream"
)
class Parser(argclass.Parser):
log = LogGroup()
parser = Parser()
parser.parse_args([])
assert parser.log.level == logging.INFO
assert parser.log.format == "stream"
parser.parse_args(["--log-level=debug", "--log-format=json"])
assert parser.log.level == logging.DEBUG
assert parser.log.format == "json"
def test_log_group_defaults():
class LogGroup(argclass.Group):
level: int = argclass.LogLevel
format: str = argclass.Argument(
choices=("json", "stream")
)
class Parser(argclass.Parser):
log = LogGroup(defaults=dict(format="json", level="error"))
parser = Parser()
parser.parse_args([])
assert parser.log.level == logging.ERROR
assert parser.log.format == "json"
def test_environment_required():
class Parser(argclass.Parser):
required: int
parser = Parser(auto_env_var_prefix="TEST_")
os.environ['TEST_REQUIRED'] = "100"
parser.parse_args([])
assert parser.required == 100
os.environ.pop('TEST_REQUIRED')
with pytest.raises(SystemExit):
parser.parse_args([])
def test_nargs_and_converter():
class Parser(argclass.Parser):
args_set: FrozenSet[int] = argclass.Argument(
type=int, nargs="+", converter=frozenset
)
parser = Parser()
parser.parse_args(["--args-set", "1", "2", "3", "4", "5"])
assert isinstance(parser.args_set, frozenset)
assert parser.args_set == frozenset([1, 2, 3, 4, 5])
def test_nargs_and_converter_not_required():
class Parser(argclass.Parser):
args_set: FrozenSet[int] = argclass.Argument(
type=int, nargs="*", converter=frozenset
)
parser = Parser()
parser.parse_args([])
assert isinstance(parser.args_set, frozenset)
assert parser.args_set == frozenset([])
parser.parse_args(["--args-set", "1", "2", "3", "4", "5"])
assert isinstance(parser.args_set, frozenset)
assert parser.args_set == frozenset([1, 2, 3, 4, 5])
def test_nargs_1():
class Parser(argclass.Parser):
args_set: FrozenSet[int] = argclass.Argument(
type=int, nargs=1, converter=frozenset
)
parser = Parser()
parser.parse_args([])
assert isinstance(parser.args_set, frozenset)
assert parser.args_set == frozenset([])
parser.parse_args(["--args-set", "1"])
assert isinstance(parser.args_set, frozenset)
assert parser.args_set == frozenset([1])
def test_nargs_env_var():
class Parser(argclass.Parser):
nargs: FrozenSet[int] = argclass.Argument(
type=int, nargs="*", converter=frozenset, env_var="NARGS"
)
os.environ['NARGS'] = "[1, 2, 3]"
try:
parser = Parser()
parser.parse_args([])
finally:
del os.environ['NARGS']
assert parser.nargs == frozenset({1, 2, 3})
def test_nargs_config_list(tmp_path):
class Parser(argclass.Parser):
nargs: FrozenSet[int] = argclass.Argument(
type=int, nargs="*", converter=frozenset, env_var="NARGS"
)
conf_file = tmp_path / "config.ini"
with open(conf_file, "w") as fp:
fp.write("[DEFAULT]\n")
fp.write("nargs = [1, 2, 3, 4]\n")
parser = Parser(config_files=[conf_file])
parser.parse_args([])
assert parser.nargs == frozenset({1, 2, 3, 4})
def test_nargs_config_set(tmp_path):
class Parser(argclass.Parser):
nargs: FrozenSet[int] = argclass.Argument(
type=int, nargs="*", converter=frozenset, env_var="NARGS"
)
conf_file = tmp_path / "config.ini"
with open(conf_file, "w") as fp:
fp.write("[DEFAULT]\n")
fp.write("nargs = {1, 2, 3, 4}\n")
parser = Parser(config_files=[conf_file])
parser.parse_args([])
assert parser.nargs == frozenset({1, 2, 3, 4})
|
[
"uuid.uuid4",
"argclass.Argument",
"os.environ.pop",
"pytest.raises"
] |
[((9310, 9341), 'os.environ.pop', 'os.environ.pop', (['"""TEST_REQUIRED"""'], {}), "('TEST_REQUIRED')\n", (9324, 9341), False, 'import os\n'), ((209, 338), 'argclass.Argument', 'argclass.Argument', (['"""integers"""'], {'type': 'int', 'nargs': 'argclass.Nargs.ONE_OR_MORE', 'metavar': '"""N"""', 'help': '"""an integer for the accumulator"""'}), "('integers', type=int, nargs=argclass.Nargs.ONE_OR_MORE,\n metavar='N', help='an integer for the accumulator')\n", (226, 338), False, 'import argclass\n'), ((403, 543), 'argclass.Argument', 'argclass.Argument', (['"""--sum"""'], {'action': 'argclass.Actions.STORE_CONST', 'const': 'sum', 'default': 'max', 'help': '"""sum the integers (default: find the max)"""'}), "('--sum', action=argclass.Actions.STORE_CONST, const=sum,\n default=max, help='sum the integers (default: find the max)')\n", (420, 543), False, 'import argclass\n'), ((891, 920), 'argclass.Argument', 'argclass.Argument', ([], {'help': '"""foo"""'}), "(help='foo')\n", (908, 920), False, 'import argclass\n'), ((3240, 3274), 'argclass.Argument', 'argclass.Argument', ([], {'env_var': 'env_var'}), '(env_var=env_var)\n', (3257, 3274), False, 'import argclass\n'), ((3291, 3303), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3301, 3303), False, 'import uuid\n'), ((3564, 3626), 'argclass.Argument', 'argclass.Argument', ([], {'nargs': 'argclass.Nargs.ZERO_OR_MORE', 'type': 'int'}), '(nargs=argclass.Nargs.ZERO_OR_MORE, type=int)\n', (3581, 3626), False, 'import argclass\n'), ((3669, 3697), 'argclass.Argument', 'argclass.Argument', ([], {'nargs': '"""*"""'}), "(nargs='*')\n", (3686, 3697), False, 'import argclass\n'), ((3718, 3744), 'argclass.Argument', 'argclass.Argument', ([], {'nargs': '(1)'}), '(nargs=1)\n', (3735, 3744), False, 'import argclass\n'), ((4005, 4028), 'argclass.Argument', 'argclass.Argument', (['"""-F"""'], {}), "('-F')\n", (4022, 4028), False, 'import argclass\n'), ((6815, 6847), 'argclass.Argument', 'argclass.Argument', ([], {'required': '(True)'}), '(required=True)\n', (6832, 6847), False, 'import argclass\n'), ((7179, 7204), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (7192, 7204), False, 'import pytest\n'), ((7619, 7652), 'argclass.Argument', 'argclass.Argument', ([], {'required': '(False)'}), '(required=False)\n', (7636, 7652), False, 'import argclass\n'), ((7942, 7967), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (7955, 7967), False, 'import pytest\n'), ((8191, 8254), 'argclass.Argument', 'argclass.Argument', ([], {'choices': "('json', 'stream')", 'default': '"""stream"""'}), "(choices=('json', 'stream'), default='stream')\n", (8208, 8254), False, 'import argclass\n'), ((8755, 8800), 'argclass.Argument', 'argclass.Argument', ([], {'choices': "('json', 'stream')"}), "(choices=('json', 'stream'))\n", (8772, 8800), False, 'import argclass\n'), ((9352, 9377), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (9365, 9377), False, 'import pytest\n'), ((9513, 9572), 'argclass.Argument', 'argclass.Argument', ([], {'type': 'int', 'nargs': '"""+"""', 'converter': 'frozenset'}), "(type=int, nargs='+', converter=frozenset)\n", (9530, 9572), False, 'import argclass\n'), ((9905, 9964), 'argclass.Argument', 'argclass.Argument', ([], {'type': 'int', 'nargs': '"""*"""', 'converter': 'frozenset'}), "(type=int, nargs='*', converter=frozenset)\n", (9922, 9964), False, 'import argclass\n'), ((10393, 10450), 'argclass.Argument', 'argclass.Argument', ([], {'type': 'int', 'nargs': '(1)', 'converter': 'frozenset'}), '(type=int, nargs=1, converter=frozenset)\n', (10410, 10450), False, 'import argclass\n'), ((10850, 10926), 'argclass.Argument', 'argclass.Argument', ([], {'type': 'int', 'nargs': '"""*"""', 'converter': 'frozenset', 'env_var': '"""NARGS"""'}), "(type=int, nargs='*', converter=frozenset, env_var='NARGS')\n", (10867, 10926), False, 'import argclass\n'), ((11254, 11330), 'argclass.Argument', 'argclass.Argument', ([], {'type': 'int', 'nargs': '"""*"""', 'converter': 'frozenset', 'env_var': '"""NARGS"""'}), "(type=int, nargs='*', converter=frozenset, env_var='NARGS')\n", (11271, 11330), False, 'import argclass\n'), ((11738, 11814), 'argclass.Argument', 'argclass.Argument', ([], {'type': 'int', 'nargs': '"""*"""', 'converter': 'frozenset', 'env_var': '"""NARGS"""'}), "(type=int, nargs='*', converter=frozenset, env_var='NARGS')\n", (11755, 11814), False, 'import argclass\n'), ((2566, 2595), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (2579, 2595), False, 'import pytest\n'), ((2789, 2801), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2799, 2801), False, 'import uuid\n'), ((3376, 3399), 'os.environ.pop', 'os.environ.pop', (['env_var'], {}), '(env_var)\n', (3390, 3399), False, 'import os\n'), ((2890, 2921), 'os.environ.pop', 'os.environ.pop', (['f"""{prefix}_FOO"""'], {}), "(f'{prefix}_FOO')\n", (2904, 2921), False, 'import os\n'), ((3140, 3152), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3150, 3152), False, 'import uuid\n'), ((3159, 3171), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3169, 3171), False, 'import uuid\n'), ((2725, 2737), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2735, 2737), False, 'import uuid\n'), ((2744, 2756), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2754, 2756), False, 'import uuid\n')]
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from mmcv.utils import Registry, build_from_cfg
PRIOR_GENERATORS = Registry('Generator for anchors and points')
ANCHOR_GENERATORS = PRIOR_GENERATORS
def build_prior_generator(cfg, default_args=None):
return build_from_cfg(cfg, PRIOR_GENERATORS, default_args)
def build_anchor_generator(cfg, default_args=None):
warnings.warn(
'``build_anchor_generator`` would be deprecated soon, please use '
'``build_prior_generator`` ')
return build_prior_generator(cfg, default_args=default_args)
|
[
"warnings.warn",
"mmcv.utils.build_from_cfg",
"mmcv.utils.Registry"
] |
[((138, 182), 'mmcv.utils.Registry', 'Registry', (['"""Generator for anchors and points"""'], {}), "('Generator for anchors and points')\n", (146, 182), False, 'from mmcv.utils import Registry, build_from_cfg\n'), ((291, 342), 'mmcv.utils.build_from_cfg', 'build_from_cfg', (['cfg', 'PRIOR_GENERATORS', 'default_args'], {}), '(cfg, PRIOR_GENERATORS, default_args)\n', (305, 342), False, 'from mmcv.utils import Registry, build_from_cfg\n'), ((405, 522), 'warnings.warn', 'warnings.warn', (['"""``build_anchor_generator`` would be deprecated soon, please use ``build_prior_generator`` """'], {}), "(\n '``build_anchor_generator`` would be deprecated soon, please use ``build_prior_generator`` '\n )\n", (418, 522), False, 'import warnings\n')]
|
"""
System tests.
<NAME> <<EMAIL>>
"""
import os
import re
import sh
from . import utils
def test_stdout():
"""Verify stdout and stderr.
pytest docs on capturing stdout and stderr
https://pytest.readthedocs.io/en/2.7.3/capture.html
"""
mailmerge_cmd = sh.Command("mailmerge")
output = mailmerge_cmd(
"--template", os.path.join(utils.TESTDATA, "simple_template.txt"),
"--database", os.path.join(utils.TESTDATA, "simple_database.csv"),
"--config", os.path.join(utils.TESTDATA, "server_open.conf"),
"--no-limit",
"--dry-run",
)
# Verify mailmerge output. We'll filter out the Date header because it
# won't match exactly.
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
assert stderr == ""
assert "Date:" in stdout
stdout = re.sub(r"Date.*\n", "", stdout)
assert stdout == """>>> message 0
TO: <EMAIL>
SUBJECT: Testing mailmerge
FROM: My Self <<EMAIL>>
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Hi, Myself,
Your number is 17.
>>> sent message 0
>>> message 1
TO: <EMAIL>
SUBJECT: Testing mailmerge
FROM: My Self <<EMAIL>>
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
Content-Transfer-Encoding: 7bit
Hi, Bob,
Your number is 42.
>>> sent message 1
>>> This was a dry run. To send messages, use the --no-dry-run option.
"""
|
[
"re.sub",
"os.path.join",
"sh.Command"
] |
[((276, 299), 'sh.Command', 'sh.Command', (['"""mailmerge"""'], {}), "('mailmerge')\n", (286, 299), False, 'import sh\n'), ((853, 884), 're.sub', 're.sub', (['"""Date.*\\\\n"""', '""""""', 'stdout'], {}), "('Date.*\\\\n', '', stdout)\n", (859, 884), False, 'import re\n'), ((350, 401), 'os.path.join', 'os.path.join', (['utils.TESTDATA', '"""simple_template.txt"""'], {}), "(utils.TESTDATA, 'simple_template.txt')\n", (362, 401), False, 'import os\n'), ((425, 476), 'os.path.join', 'os.path.join', (['utils.TESTDATA', '"""simple_database.csv"""'], {}), "(utils.TESTDATA, 'simple_database.csv')\n", (437, 476), False, 'import os\n'), ((498, 546), 'os.path.join', 'os.path.join', (['utils.TESTDATA', '"""server_open.conf"""'], {}), "(utils.TESTDATA, 'server_open.conf')\n", (510, 546), False, 'import os\n')]
|
import argparse
import torch
import cv2
import os
import torch.nn.parallel
import modules, net, resnet, densenet, senet
import numpy as np
import loaddata_demo as loaddata
import pdb
import argparse
from volume import get_volume
from mask import get_mask
import matplotlib.image
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='KD-network')
parser.add_argument('--img', metavar='DIR',default="./input/test.jpg",
help='img to input')
parser.add_argument('--json', metavar='DIR',default="./input/test.json",
help='json file to input')
parser.add_argument('--output', metavar='DIR',default="./output",
help='dir to output')
args=parser.parse_args()
def define_model(is_resnet, is_densenet, is_senet):
if is_resnet:
original_model = resnet.resnet50(pretrained = True)
Encoder = modules.E_resnet(original_model)
model = net.model(Encoder, num_features=2048, block_channel = [256, 512, 1024, 2048])
if is_densenet:
original_model = densenet.densenet161(pretrained=True)
Encoder = modules.E_densenet(original_model)
model = net.model(Encoder, num_features=2208, block_channel = [192, 384, 1056, 2208])
if is_senet:
original_model = senet.senet154(pretrained='imagenet')
Encoder = modules.E_senet(original_model)
model = net.model(Encoder, num_features=2048, block_channel = [256, 512, 1024, 2048])
return model
def main():
if (not os.path.exists(args.output)):
print("Output directory doesn't exist! Creating...")
os.makedirs(args.output)
model = define_model(is_resnet=False, is_densenet=False, is_senet=True)
model = torch.nn.DataParallel(model).cuda()
model.load_state_dict(torch.load('./pretrained_model/model_senet'))
model.eval()
print
img = cv2.imread(args.img)
nyu2_loader = loaddata.readNyu2(args.img)
test(nyu2_loader, model, img.shape[1], img.shape[0])
def test(nyu2_loader, model, width, height):
for i, image in enumerate(nyu2_loader):
image = torch.autograd.Variable(image, volatile=True).cuda()
out = model(image)
out = out.view(out.size(2),out.size(3)).data.cpu().numpy()
max_pix = out.max()
min_pix = out.min()
out = (out-min_pix)/(max_pix-min_pix)*255
out = cv2.resize(out,(width,height),interpolation=cv2.INTER_CUBIC)
cv2.imwrite(os.path.join(args.output, "out_grey.png"),out)
out_grey = cv2.imread(os.path.join(args.output, "out_grey.png"),0)
out_color = cv2.applyColorMap(out_grey, cv2.COLORMAP_JET)
cv2.imwrite(os.path.join(args.output, "out_color.png"),out_color)
vol = get_volume(out_grey, args.json)
print("Volume:")
print(vol)
print("unit: cm^3")
out_file = open(os.path.join(args.output, "out.txt"), "w")
out_file.write("Volume:\n")
out_file.write(str(vol))
out_file.write("\n")
out_file.write("unit: cm^3")
out_file.close()
get_mask(out_grey, args.json, args.output)
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"net.model",
"os.path.join",
"loaddata_demo.readNyu2",
"torch.load",
"os.path.exists",
"senet.senet154",
"mask.get_mask",
"cv2.resize",
"densenet.densenet161",
"modules.E_senet",
"torch.autograd.Variable",
"cv2.applyColorMap",
"os.makedirs",
"modules.E_resnet",
"volume.get_volume",
"modules.E_densenet",
"cv2.imread",
"torch.nn.DataParallel",
"resnet.resnet50"
] |
[((322, 371), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""KD-network"""'}), "(description='KD-network')\n", (345, 371), False, 'import argparse\n'), ((1874, 1894), 'cv2.imread', 'cv2.imread', (['args.img'], {}), '(args.img)\n', (1884, 1894), False, 'import cv2\n'), ((1914, 1941), 'loaddata_demo.readNyu2', 'loaddata.readNyu2', (['args.img'], {}), '(args.img)\n', (1931, 1941), True, 'import loaddata_demo as loaddata\n'), ((834, 866), 'resnet.resnet50', 'resnet.resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (849, 866), False, 'import modules, net, resnet, densenet, senet\n'), ((887, 919), 'modules.E_resnet', 'modules.E_resnet', (['original_model'], {}), '(original_model)\n', (903, 919), False, 'import modules, net, resnet, densenet, senet\n'), ((937, 1012), 'net.model', 'net.model', (['Encoder'], {'num_features': '(2048)', 'block_channel': '[256, 512, 1024, 2048]'}), '(Encoder, num_features=2048, block_channel=[256, 512, 1024, 2048])\n', (946, 1012), False, 'import modules, net, resnet, densenet, senet\n'), ((1060, 1097), 'densenet.densenet161', 'densenet.densenet161', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1080, 1097), False, 'import modules, net, resnet, densenet, senet\n'), ((1116, 1150), 'modules.E_densenet', 'modules.E_densenet', (['original_model'], {}), '(original_model)\n', (1134, 1150), False, 'import modules, net, resnet, densenet, senet\n'), ((1167, 1242), 'net.model', 'net.model', (['Encoder'], {'num_features': '(2208)', 'block_channel': '[192, 384, 1056, 2208]'}), '(Encoder, num_features=2208, block_channel=[192, 384, 1056, 2208])\n', (1176, 1242), False, 'import modules, net, resnet, densenet, senet\n'), ((1287, 1324), 'senet.senet154', 'senet.senet154', ([], {'pretrained': '"""imagenet"""'}), "(pretrained='imagenet')\n", (1301, 1324), False, 'import modules, net, resnet, densenet, senet\n'), ((1343, 1374), 'modules.E_senet', 'modules.E_senet', (['original_model'], {}), '(original_model)\n', (1358, 1374), False, 'import modules, net, resnet, densenet, senet\n'), ((1391, 1466), 'net.model', 'net.model', (['Encoder'], {'num_features': '(2048)', 'block_channel': '[256, 512, 1024, 2048]'}), '(Encoder, num_features=2048, block_channel=[256, 512, 1024, 2048])\n', (1400, 1466), False, 'import modules, net, resnet, densenet, senet\n'), ((1516, 1543), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (1530, 1543), False, 'import os\n'), ((1615, 1639), 'os.makedirs', 'os.makedirs', (['args.output'], {}), '(args.output)\n', (1626, 1639), False, 'import os\n'), ((1791, 1835), 'torch.load', 'torch.load', (['"""./pretrained_model/model_senet"""'], {}), "('./pretrained_model/model_senet')\n", (1801, 1835), False, 'import torch\n'), ((2381, 2444), 'cv2.resize', 'cv2.resize', (['out', '(width, height)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(out, (width, height), interpolation=cv2.INTER_CUBIC)\n', (2391, 2444), False, 'import cv2\n'), ((2604, 2649), 'cv2.applyColorMap', 'cv2.applyColorMap', (['out_grey', 'cv2.COLORMAP_JET'], {}), '(out_grey, cv2.COLORMAP_JET)\n', (2621, 2649), False, 'import cv2\n'), ((2738, 2769), 'volume.get_volume', 'get_volume', (['out_grey', 'args.json'], {}), '(out_grey, args.json)\n', (2748, 2769), False, 'from volume import get_volume\n'), ((3077, 3119), 'mask.get_mask', 'get_mask', (['out_grey', 'args.json', 'args.output'], {}), '(out_grey, args.json, args.output)\n', (3085, 3119), False, 'from mask import get_mask\n'), ((1729, 1757), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (1750, 1757), False, 'import torch\n'), ((2462, 2503), 'os.path.join', 'os.path.join', (['args.output', '"""out_grey.png"""'], {}), "(args.output, 'out_grey.png')\n", (2474, 2503), False, 'import os\n'), ((2539, 2580), 'os.path.join', 'os.path.join', (['args.output', '"""out_grey.png"""'], {}), "(args.output, 'out_grey.png')\n", (2551, 2580), False, 'import os\n'), ((2670, 2712), 'os.path.join', 'os.path.join', (['args.output', '"""out_color.png"""'], {}), "(args.output, 'out_color.png')\n", (2682, 2712), False, 'import os\n'), ((2866, 2902), 'os.path.join', 'os.path.join', (['args.output', '"""out.txt"""'], {}), "(args.output, 'out.txt')\n", (2878, 2902), False, 'import os\n'), ((2114, 2159), 'torch.autograd.Variable', 'torch.autograd.Variable', (['image'], {'volatile': '(True)'}), '(image, volatile=True)\n', (2137, 2159), False, 'import torch\n')]
|
# Generated by Django 2.1.5 on 2019-09-11 07:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('features', '0016_auto_20190605_1830'),
]
operations = [
migrations.AlterField(
model_name='feature',
name='apps',
field=models.ManyToManyField(related_name='app_features', to='applications.Application', verbose_name='关联应用'),
),
]
|
[
"django.db.models.ManyToManyField"
] |
[((336, 444), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""app_features"""', 'to': '"""applications.Application"""', 'verbose_name': '"""关联应用"""'}), "(related_name='app_features', to=\n 'applications.Application', verbose_name='关联应用')\n", (358, 444), False, 'from django.db import migrations, models\n')]
|
from django.db import models
from django.utils import timezone
from django.utils.text import slugify
from django.utils.translation import gettext_lazy as _
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.core.fields import RichTextField
from hextech_core.core.models.base_model import BaseModel, MetadataModel
from hextech_core.core.utils import no_accent_vietnamese
from hextech_core.core.utils.id import RandomID
from hextech_core.users.models import User
class BlogCategory(MetadataModel):
parent = models.ForeignKey(
"self",
on_delete=models.PROTECT,
related_name="child_categories",
null=True,
blank=True,
)
name = models.CharField(max_length=100)
slug = models.SlugField(blank=True, unique=True, db_index=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def __str__(self):
return self.name
class BlogTag(BaseModel):
tag = models.CharField(max_length=50, unique=True)
def __str__(self):
return self.tag
@classmethod
def tagger(cls, tag: str) -> str:
tag = no_accent_vietnamese(tag)
tag = "".join([ele.title() for ele in tag.split(" ")])
return tag
def save(self, *args, **kwargs):
if not self.pk:
self.tag = self.tagger(self.tag)
return super().save()
class Blog(ClusterableModel, MetadataModel):
id = models.BigIntegerField(
_("Random id"), default=RandomID("blog.Blog"), primary_key=True
)
author = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="blogs", db_index=True
)
category = models.ForeignKey(
BlogCategory,
on_delete=models.SET_NULL,
related_name="blogs",
db_index=True,
null=True,
)
title = models.CharField(max_length=400)
content = RichTextField()
slug = models.SlugField(blank=True, unique=True, db_index=True, max_length=450)
tags = models.ManyToManyField(BlogTag, blank=True)
published = models.BooleanField(default=True)
published_at = models.DateTimeField(null=True, blank=True)
class Meta:
unique_together = ("author", "title")
def save(self, *args, **kwargs):
print(self.__dict__)
if self.published and not self.published_at:
self.published_at = timezone.now()
self.slug = f"{slugify(self.title)}-{self.author.id}"
super().save(*args, **kwargs)
def __str__(self):
return self.title
class BlogComment(BaseModel):
blog = ParentalKey(Blog, on_delete=models.CASCADE, related_name="comments")
content = RichTextField()
title = models.CharField(max_length=255)
created_by = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="+", null=True, blank=True
)
def __str__(self):
return f"#{self.blog.id} - {self.title if self.title else 'Untitled'}"
class BlogLike(BaseModel):
blog = models.ForeignKey(Blog, on_delete=models.CASCADE, related_name="likes")
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="liked")
is_like = models.BooleanField(default=True)
class Meta:
unique_together = ("blog", "user")
|
[
"django.db.models.ManyToManyField",
"modelcluster.fields.ParentalKey",
"django.utils.translation.gettext_lazy",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.utils.timezone.now",
"django.db.models.SlugField",
"django.db.models.BooleanField",
"django.utils.text.slugify",
"hextech_core.core.utils.no_accent_vietnamese",
"hextech_core.core.utils.id.RandomID",
"django.db.models.DateTimeField",
"wagtail.core.fields.RichTextField"
] |
[((567, 679), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""self"""'], {'on_delete': 'models.PROTECT', 'related_name': '"""child_categories"""', 'null': '(True)', 'blank': '(True)'}), "('self', on_delete=models.PROTECT, related_name=\n 'child_categories', null=True, blank=True)\n", (584, 679), False, 'from django.db import models\n'), ((733, 765), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (749, 765), False, 'from django.db import models\n'), ((777, 833), 'django.db.models.SlugField', 'models.SlugField', ([], {'blank': '(True)', 'unique': '(True)', 'db_index': '(True)'}), '(blank=True, unique=True, db_index=True)\n', (793, 833), False, 'from django.db import models\n'), ((1036, 1080), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'unique': '(True)'}), '(max_length=50, unique=True)\n', (1052, 1080), False, 'from django.db import models\n'), ((1615, 1714), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""users.User"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""blogs"""', 'db_index': '(True)'}), "('users.User', on_delete=models.CASCADE, related_name=\n 'blogs', db_index=True)\n", (1632, 1714), False, 'from django.db import models\n'), ((1739, 1850), 'django.db.models.ForeignKey', 'models.ForeignKey', (['BlogCategory'], {'on_delete': 'models.SET_NULL', 'related_name': '"""blogs"""', 'db_index': '(True)', 'null': '(True)'}), "(BlogCategory, on_delete=models.SET_NULL, related_name=\n 'blogs', db_index=True, null=True)\n", (1756, 1850), False, 'from django.db import models\n'), ((1905, 1937), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(400)'}), '(max_length=400)\n', (1921, 1937), False, 'from django.db import models\n'), ((1952, 1967), 'wagtail.core.fields.RichTextField', 'RichTextField', ([], {}), '()\n', (1965, 1967), False, 'from wagtail.core.fields import RichTextField\n'), ((1979, 2051), 'django.db.models.SlugField', 'models.SlugField', ([], {'blank': '(True)', 'unique': '(True)', 'db_index': '(True)', 'max_length': '(450)'}), '(blank=True, unique=True, db_index=True, max_length=450)\n', (1995, 2051), False, 'from django.db import models\n'), ((2063, 2106), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['BlogTag'], {'blank': '(True)'}), '(BlogTag, blank=True)\n', (2085, 2106), False, 'from django.db import models\n'), ((2123, 2156), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (2142, 2156), False, 'from django.db import models\n'), ((2176, 2219), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (2196, 2219), False, 'from django.db import models\n'), ((2643, 2711), 'modelcluster.fields.ParentalKey', 'ParentalKey', (['Blog'], {'on_delete': 'models.CASCADE', 'related_name': '"""comments"""'}), "(Blog, on_delete=models.CASCADE, related_name='comments')\n", (2654, 2711), False, 'from modelcluster.fields import ParentalKey\n'), ((2726, 2741), 'wagtail.core.fields.RichTextField', 'RichTextField', ([], {}), '()\n', (2739, 2741), False, 'from wagtail.core.fields import RichTextField\n'), ((2754, 2786), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (2770, 2786), False, 'from django.db import models\n'), ((2804, 2906), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""users.User"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""+"""', 'null': '(True)', 'blank': '(True)'}), "('users.User', on_delete=models.CASCADE, related_name='+',\n null=True, blank=True)\n", (2821, 2906), False, 'from django.db import models\n'), ((3060, 3131), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Blog'], {'on_delete': 'models.CASCADE', 'related_name': '"""likes"""'}), "(Blog, on_delete=models.CASCADE, related_name='likes')\n", (3077, 3131), False, 'from django.db import models\n'), ((3143, 3214), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'related_name': '"""liked"""'}), "(User, on_delete=models.CASCADE, related_name='liked')\n", (3160, 3214), False, 'from django.db import models\n'), ((3229, 3262), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (3248, 3262), False, 'from django.db import models\n'), ((892, 910), 'django.utils.text.slugify', 'slugify', (['self.name'], {}), '(self.name)\n', (899, 910), False, 'from django.utils.text import slugify\n'), ((1199, 1224), 'hextech_core.core.utils.no_accent_vietnamese', 'no_accent_vietnamese', (['tag'], {}), '(tag)\n', (1219, 1224), False, 'from hextech_core.core.utils import no_accent_vietnamese\n'), ((1532, 1546), 'django.utils.translation.gettext_lazy', '_', (['"""Random id"""'], {}), "('Random id')\n", (1533, 1546), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1556, 1577), 'hextech_core.core.utils.id.RandomID', 'RandomID', (['"""blog.Blog"""'], {}), "('blog.Blog')\n", (1564, 1577), False, 'from hextech_core.core.utils.id import RandomID\n'), ((2435, 2449), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2447, 2449), False, 'from django.utils import timezone\n'), ((2473, 2492), 'django.utils.text.slugify', 'slugify', (['self.title'], {}), '(self.title)\n', (2480, 2492), False, 'from django.utils.text import slugify\n')]
|
import torch
import torch.nn.functional as F
import torch.utils.data
import torch.utils.data
from models.base_models import OCModel, PUModelRandomBatch
from models.classifiers import Net
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# code DROCC is borrowed from https://github.com/microsoft/EdgeML
class DROCC(OCModel):
def __init__(self,
model=Net,
lam=0.5,
radius=8,
gamma=2,
warmup_epochs=6,
ascent_step_size=0.001,
ascent_num_steps=50,
half=True):
super().__init__(model, 0)
self.lam = lam
self.radius = radius
self.gamma = gamma
self.warmup_epochs = warmup_epochs
self.ascent_step_size = ascent_step_size
self.ascent_num_steps = ascent_num_steps
self.half = half
def batch_loss(self, batch):
data, target = batch[0], batch[2]
data, target = data.to(device), target.to(device)
# Data Processing
data = data.to(torch.float)
target = target.to(torch.float)
target = torch.squeeze(target)
# Extract the logits for cross entropy loss
logits_start = self.model.forward_start(data)
logits = self.model.forward_end(logits_start)
logits = torch.squeeze(logits, dim=1)
ce_loss = F.binary_cross_entropy_with_logits(logits, target)
# Add to the epoch variable for printing average CE Loss
'''
Adversarial Loss is calculated only for the positive data points (label==1).
'''
if self.epoch >= self.warmup_epochs:
logits_start = logits_start[target == 1]
# AdvLoss
if not self.half:
adv_loss = self.one_class_adv_loss(data[target == 1].detach(), self.half)
else:
adv_loss = self.one_class_adv_loss(logits_start.detach(), self.half)
loss = ce_loss + adv_loss * self.lam
else:
# If only CE based training has to be done
loss = ce_loss
return loss
def one_class_adv_loss(self, x_train_data, half=True):
"""Computes the adversarial loss:
1) Sample points initially at random around the positive training
data points
2) Gradient ascent to find the most optimal point in set N_i(r)
classified as +ve (label=0). This is done by maximizing
the CE loss wrt label 0
3) Project the points between spheres of radius R and gamma * R
(set N_i(r))
4) Pass the calculated adversarial points through the model,
and calculate the CE loss wrt target class 0
Parameters
----------
x_train_data: Batch of data to compute loss on.
"""
batch_size = len(x_train_data)
# Randomly sample points around the training data
# We will perform SGD on these to find the adversarial points
x_adv = torch.randn(x_train_data.shape).to(device).detach().requires_grad_()
x_adv_sampled = x_adv + x_train_data
for step in range(self.ascent_num_steps):
with torch.enable_grad():
new_targets = torch.zeros(batch_size, 1).to(device)
# new_targets = (1 - targets).to(self.device)
new_targets = torch.squeeze(new_targets)
new_targets = new_targets.to(torch.float)
if half:
logits = self.model.forward_end(x_adv_sampled)
else:
logits = self.model(x_adv_sampled)
logits = torch.squeeze(logits, dim=1)
new_loss = F.binary_cross_entropy_with_logits(logits, new_targets)
grad = torch.autograd.grad(new_loss, [x_adv_sampled])[0]
grad_norm = torch.norm(grad, p=2, dim=tuple(range(1, grad.dim())))
grad_norm = grad_norm.view(-1, *[1] * (grad.dim() - 1))
grad_normalized = grad / grad_norm
with torch.no_grad():
x_adv_sampled.add_(self.ascent_step_size * grad_normalized)
if (step + 1) % 10 == 0:
# Project the normal points to the set N_i(r)
h = x_adv_sampled - x_train_data
norm_h = torch.sqrt(torch.sum(h ** 2,
dim=tuple(range(1, h.dim()))))
alpha = torch.clamp(norm_h, self.radius,
self.gamma * self.radius).to(device)
# Make use of broadcast to project h
proj = (alpha / norm_h).view(-1, *[1] * (h.dim() - 1))
h = proj * h
x_adv_sampled = x_train_data + h # These adv_points are now on the surface of hyper-sphere
if half:
adv_pred = self.model.forward_end(x_adv_sampled)
else:
adv_pred = self.model(x_adv_sampled)
adv_pred = torch.squeeze(adv_pred, dim=1)
adv_loss = F.binary_cross_entropy_with_logits(adv_pred, new_targets)
return adv_loss
# class DROCC(nn.Module):
# def __init__(self, ):
# super().__init__()
#
# self.model = CIFAR10_LeNet()
#
# def run_train(self,
# train_data,
# test_data,
# lamda=0.5,
# radius=8,
# gamma=2,
# verbose=False,
# learning_rate=1e-3,
# total_epochs=30,
# only_ce_epochs=6,
# ascent_step_size=0.001,
# ascent_num_steps=50,
# gamma_lr=1,
# batch_size=128,
# half=True):
#
# self.best_score = -np.inf
# best_model = None
# self.ascent_num_steps = ascent_num_steps
# self.ascent_step_size = ascent_step_size
# self.lamda = lamda
# self.radius = radius
# self.gamma = gamma
#
# self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)
# lr_scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=gamma_lr)
#
# train_loader = torch.utils.data.DataLoader(train_data,
# batch_size=batch_size,
# shuffle=True)
#
# test_loader = torch.utils.data.DataLoader(test_data,
# batch_size=batch_size,
# shuffle=True)
#
# for epoch in range(total_epochs):
# # Make the weights trainable
# self.model.train()
#
# # Placeholder for the respective 2 loss values
# epoch_adv_loss = torch.tensor([0]).type(torch.float32).to(device) # AdvLoss
# epoch_ce_loss = 0 # Cross entropy Loss
#
# batch_idx = -1
# for data, target, _ in train_loader:
# batch_idx += 1
# data, target = data.to(device), target.to(device)
# # Data Processing
# data = data.to(torch.float)
# target = target.to(torch.float)
# target = torch.squeeze(target)
#
# self.optimizer.zero_grad()
#
# # Extract the logits for cross entropy loss
# logits_start = self.model.half_forward_start(data)
# logits = self.model.half_forward_end(logits_start)
#
# logits = torch.squeeze(logits, dim=1)
# ce_loss = F.binary_cross_entropy_with_logits(logits, target)
# # Add to the epoch variable for printing average CE Loss
# epoch_ce_loss += ce_loss
#
# '''
# Adversarial Loss is calculated only for the positive data points (label==1).
# '''
# if epoch >= only_ce_epochs:
# logits_start = logits_start[target == 1]
# # AdvLoss
# if not half:
# adv_loss = self.one_class_adv_loss(data[target == 1].detach(), target[target == 1], half)
# else:
# adv_loss = self.one_class_adv_loss(logits_start.detach(), target[target == 1], half)
# epoch_adv_loss += adv_loss
#
# loss = ce_loss + adv_loss * self.lamda
# else:
# # If only CE based training has to be done
# loss = ce_loss
#
# # Backprop
# loss.backward()
# self.optimizer.step()
#
# epoch_ce_loss = epoch_ce_loss / (batch_idx + 1) # Average CE Loss
# epoch_adv_loss = epoch_adv_loss / (batch_idx + 1) # Average AdvLoss
#
# if verbose:
# test_score = self.test(test_loader)
# if test_score > self.best_score:
# self.best_score = test_score
# best_model = copy.deepcopy(self.model)
#
# print('Epoch: {}, CE Loss: {}, AdvLoss: {}, {}: {}'.format(
# epoch, epoch_ce_loss.item(), epoch_adv_loss.item(),
# 'AUC', test_score))
# lr_scheduler.step()
# if verbose:
# self.model = copy.deepcopy(best_model)
# print('\nBest test {}: {}'.format(
# 'AUC', self.best_score
# ))
#
# def test(self, test_loader, metric='AUC'):
# """Evaluate the model on the given test dataset.
# Parameters
# ----------
# test_loader: Dataloader object for the test dataset.
# metric: Metric used for evaluation (AUC / F1).
# """
# self.model.eval()
# label_score = []
# batch_idx = -1
# for data, target, _ in test_loader:
# batch_idx += 1
# data, target = data.to(device), target.to(device)
# data = data.to(torch.float)
# target = target.to(torch.float)
# target = torch.squeeze(target)
#
# logits = self.model(data)
# logits = torch.squeeze(logits, dim=1)
# sigmoid_logits = torch.sigmoid(logits)
# scores = logits
# label_score += list(zip(target.cpu().data.numpy().tolist(),
# scores.cpu().data.numpy().tolist()))
# # Compute test score
# labels, scores = zip(*label_score)
# labels = np.array(labels)
# scores = np.array(scores)
# if metric == 'AUC':
# test_metric = roc_auc_score(labels, scores)
# if metric == 'alpha':
# test_metric = (scores > 0.5).mean()
# return test_metric
#
# def one_class_adv_loss(self, x_train_data, targets, half=True):
# """Computes the adversarial loss:
# 1) Sample points initially at random around the positive training
# data points
# 2) Gradient ascent to find the most optimal point in set N_i(r)
# classified as +ve (label=0). This is done by maximizing
# the CE loss wrt label 0
# 3) Project the points between spheres of radius R and gamma * R
# (set N_i(r))
# 4) Pass the calculated adversarial points through the model,
# and calculate the CE loss wrt target class 0
#
# Parameters
# ----------
# x_train_data: Batch of data to compute loss on.
# """
# batch_size = len(x_train_data)
# # Randomly sample points around the training data
# # We will perform SGD on these to find the adversarial points
# x_adv = torch.randn(x_train_data.shape).to(device).detach().requires_grad_()
# x_adv_sampled = x_adv + x_train_data
#
# for step in range(self.ascent_num_steps):
# with torch.enable_grad():
#
# new_targets = torch.zeros(batch_size, 1).to(device)
# # new_targets = (1 - targets).to(self.device)
# new_targets = torch.squeeze(new_targets)
# new_targets = new_targets.to(torch.float)
#
# if half:
# logits = self.model.half_forward_end(x_adv_sampled)
# else:
# logits = self.model(x_adv_sampled)
#
# logits = torch.squeeze(logits, dim=1)
# new_loss = F.binary_cross_entropy_with_logits(logits, new_targets)
#
# grad = torch.autograd.grad(new_loss, [x_adv_sampled])[0]
# grad_norm = torch.norm(grad, p=2, dim=tuple(range(1, grad.dim())))
# grad_norm = grad_norm.view(-1, *[1] * (grad.dim() - 1))
# grad_normalized = grad / grad_norm
# with torch.no_grad():
# x_adv_sampled.add_(self.ascent_step_size * grad_normalized)
#
# if (step + 1) % 10 == 0:
# # Project the normal points to the set N_i(r)
# h = x_adv_sampled - x_train_data
# norm_h = torch.sqrt(torch.sum(h ** 2,
# dim=tuple(range(1, h.dim()))))
# alpha = torch.clamp(norm_h, self.radius,
# self.gamma * self.radius).to(device)
# # Make use of broadcast to project h
# proj = (alpha / norm_h).view(-1, *[1] * (h.dim() - 1))
# h = proj * h
# x_adv_sampled = x_train_data + h # These adv_points are now on the surface of hyper-sphere
#
# if half:
# adv_pred = self.model.half_forward_end(x_adv_sampled)
# else:
# adv_pred = self.model(x_adv_sampled)
#
# adv_pred = torch.squeeze(adv_pred, dim=1)
# adv_loss = F.binary_cross_entropy_with_logits(adv_pred, (new_targets))
#
# return adv_loss
#
# def save(self, path):
# torch.save(self.model.state_dict(), os.path.join(path, 'model.pt'))
#
# def load(self, path):
# self.model.load_state_dict(torch.load(os.path.join(path, 'model.pt')))
class PU_DROCC(PUModelRandomBatch):
def __init__(self,
model=Net,
lam=0.5,
radius=8,
gamma=2,
warmup_epochs=6,
ascent_step_size=0.001,
ascent_num_steps=50,
half=True):
super().__init__(model, 0)
self.lam = lam
self.radius = radius
self.gamma = gamma
self.warmup_epochs = warmup_epochs
self.ascent_step_size = ascent_step_size
self.ascent_num_steps = ascent_num_steps
self.half = half
def batch_loss(self, batch):
data, target = batch[0], batch[2]
data, target = data.to(device), target.to(device)
lab_ind = target == 1
unl_ind = target == 0
# lab_cnt = max(lab_ind.sum(), 1)
unl_cnt = max(unl_ind.sum(), 1)
# Extract the logits for cross entropy loss
logits_start = self.model.forward_start(data)
logits = self.model.forward_end(logits_start[lab_ind])
logits = torch.squeeze(logits, dim=1)
ce_loss = F.binary_cross_entropy_with_logits(logits, target[lab_ind])
# Add to the epoch variable for printing average CE Loss
'''
Adversarial Loss is calculated only for the positive data points (label==1).
'''
if self.epoch >= self.warmup_epochs and unl_cnt > 1:
logits_start = logits_start[unl_ind]
# AdvLoss
if not self.half:
adv_loss = self.one_class_adv_loss(data[unl_ind].detach(), self.half)
else:
adv_loss = self.one_class_adv_loss(logits_start[unl_ind].detach(), self.half)
loss = ce_loss + adv_loss * self.lam
else:
# If only CE based training has to be done
loss = ce_loss
return loss
def one_class_adv_loss(self, x_train_data, half=True):
"""Computes the adversarial loss:
1) Sample points initially at random around the positive training
data points
2) Gradient ascent to find the most optimal point in set N_i(r)
classified as +ve (label=0). This is done by maximizing
the CE loss wrt label 0
3) Project the points between spheres of radius R and gamma * R
(set N_i(r))
4) Pass the calculated adversarial points through the model,
and calculate the CE loss wrt target class 0
Parameters
----------
x_train_data: Batch of data to compute loss on.
"""
batch_size = len(x_train_data)
# Randomly sample points around the training data
# We will perform SGD on these to find the adversarial points
x_adv = torch.randn(x_train_data.shape).to(device).detach().requires_grad_()
x_adv_sampled = x_adv + x_train_data
for step in range(self.ascent_num_steps):
with torch.enable_grad():
new_targets = torch.zeros(batch_size, 1).to(device)
# new_targets = (1 - targets).to(self.device)
new_targets = torch.squeeze(new_targets)
new_targets = new_targets.to(torch.float)
if half:
logits = self.model.forward_end(x_adv_sampled)
else:
logits = self.model(x_adv_sampled)
logits = torch.squeeze(logits, dim=1)
new_loss = F.binary_cross_entropy_with_logits(logits, new_targets)
grad = torch.autograd.grad(new_loss, [x_adv_sampled])[0]
grad_norm = torch.norm(grad, p=2, dim=tuple(range(1, grad.dim())))
grad_norm = grad_norm.view(-1, *[1] * (grad.dim() - 1))
grad_normalized = grad / grad_norm
with torch.no_grad():
x_adv_sampled.add_(self.ascent_step_size * grad_normalized)
if (step + 1) % 10 == 0:
# Project the normal points to the set N_i(r)
h = x_adv_sampled - x_train_data
norm_h = torch.sqrt(torch.sum(h ** 2,
dim=tuple(range(1, h.dim()))))
alpha = torch.clamp(norm_h, self.radius,
self.gamma * self.radius).to(device)
# Make use of broadcast to project h
proj = (alpha / norm_h).view(-1, *[1] * (h.dim() - 1))
h = proj * h
x_adv_sampled = x_train_data + h # These adv_points are now on the surface of hyper-sphere
if half:
adv_pred = self.model.forward_end(x_adv_sampled)
else:
adv_pred = self.model(x_adv_sampled)
adv_pred = torch.squeeze(adv_pred, dim=1)
adv_loss = F.binary_cross_entropy_with_logits(adv_pred, new_targets)
return adv_loss
# class PU_DROCC(nn.Module):
# def __init__(self, ):
# super().__init__()
#
# self.model = CIFAR10_LeNet()
#
# def run_train(self,
# train_data,
# test_data,
# lamda=0.5,
# radius=1,
# gamma=2,
# verbose=False,
# learning_rate=5e-4,
# total_epochs=20,
# only_ce_epochs=2,
# ascent_step_size=5e-6,
# ascent_num_steps=10,
# gamma_lr=0.96,
# batch_size=512,
# half=True):
#
# self.best_score = -np.inf
# best_model = None
# self.ascent_num_steps = ascent_num_steps
# self.ascent_step_size = ascent_step_size
# self.lamda = lamda
# self.radius = radius
# self.gamma = gamma
#
# self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)
# lr_scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=gamma_lr)
#
# train_loader = torch.utils.data.DataLoader(train_data,
# batch_size=batch_size,
# shuffle=True)
#
# test_loader = torch.utils.data.DataLoader(test_data,
# batch_size=batch_size,
# shuffle=True)
#
# for epoch in range(total_epochs):
# # Make the weights trainable
# self.model.train()
#
# # Placeholder for the respective 2 loss values
# epoch_adv_loss = torch.tensor([0]).type(torch.float32).to(device) # AdvLoss
# epoch_ce_loss = 0 # Cross entropy Loss
#
# batch_idx = -1
# for data, _, target in train_loader:
# batch_idx += 1
# data, target = data.to(device), target.to(device)
# # Data Processing
# data = data.to(torch.float)
# target = target.to(torch.float)
# target = torch.squeeze(target)
#
# self.optimizer.zero_grad()
#
# lab_ind = target == 1
# unl_ind = target == 0
#
# # lab_cnt = max(lab_ind.sum(), 1)
# unl_cnt = max(unl_ind.sum(), 1)
#
# # Extract the logits for cross entropy loss
# logits_start = self.model.half_forward_start(data)
# logits = self.model.half_forward_end(logits_start[lab_ind])
#
# logits = torch.squeeze(logits, dim=1)
# ce_loss = F.binary_cross_entropy_with_logits(logits, target[lab_ind])
# # Add to the epoch variable for printing average CE Loss
# epoch_ce_loss += ce_loss
#
# '''
# Adversarial Loss is calculated only for the positive data points (label==1).
# '''
# if epoch >= only_ce_epochs and unl_cnt > 1:
# logits_start = logits_start[unl_ind]
# # AdvLoss
# if not half:
# adv_loss = self.one_class_adv_loss(data[unl_ind].detach(), target[unl_ind], half)
# else:
# adv_loss = self.one_class_adv_loss(logits_start.detach(), target[unl_ind], half)
# epoch_adv_loss += adv_loss
#
# loss = ce_loss + adv_loss * self.lamda
# else:
# # If only CE based training has to be done
# loss = ce_loss
#
# # Backprop
# loss.backward()
# self.optimizer.step()
#
# epoch_ce_loss = epoch_ce_loss / (batch_idx + 1) # Average CE Loss
# epoch_adv_loss = epoch_adv_loss / (batch_idx + 1) # Average AdvLoss
#
# if verbose:
# test_score = self.test(test_loader)
# if test_score > self.best_score:
# self.best_score = test_score
# best_model = copy.deepcopy(self.model)
#
# print('Epoch: {}, CE Loss: {}, AdvLoss: {}, {}: {}'.format(
# epoch, epoch_ce_loss.item(), epoch_adv_loss.item(),
# 'AUC', test_score))
# lr_scheduler.step()
# if verbose:
# self.model = copy.deepcopy(best_model)
# print('\nBest test {}: {}'.format(
# 'AUC', self.best_score
# ))
#
# def test(self, test_loader, metric='AUC'):
# """Evaluate the model on the given test dataset.
# Parameters
# ----------
# test_loader: Dataloader object for the test dataset.
# metric: Metric used for evaluation (AUC / F1).
# """
# self.model.eval()
# label_score = []
# batch_idx = -1
# for data, target, _ in test_loader:
# batch_idx += 1
# data, target = data.to(device), target.to(device)
# data = data.to(torch.float)
# target = target.to(torch.float)
# target = torch.squeeze(target)
#
# logits = self.model(data)
# logits = torch.squeeze(logits, dim=1)
# sigmoid_logits = torch.sigmoid(logits)
# scores = logits
# label_score += list(zip(target.cpu().data.numpy().tolist(),
# scores.cpu().data.numpy().tolist()))
# # Compute test score
# labels, scores = zip(*label_score)
# labels = np.array(labels)
# scores = np.array(scores)
# if metric == 'AUC':
# test_metric = roc_auc_score(labels, scores)
# if metric == 'alpha':
# test_metric = (scores > 0.5).mean()
# return test_metric
#
# def one_class_adv_loss(self, x_train_data, targets, half=True):
# """Computes the adversarial loss:
# 1) Sample points initially at random around the positive training
# data points
# 2) Gradient ascent to find the most optimal point in set N_i(r)
# classified as +ve (label=0). This is done by maximizing
# the CE loss wrt label 0
# 3) Project the points between spheres of radius R and gamma * R
# (set N_i(r))
# 4) Pass the calculated adversarial points through the model,
# and calculate the CE loss wrt target class 0
#
# Parameters
# ----------
# x_train_data: Batch of data to compute loss on.
# """
# batch_size = len(x_train_data)
# # Randomly sample points around the training data
# # We will perform SGD on these to find the adversarial points
# x_adv = torch.randn(x_train_data.shape).to(device).detach().requires_grad_()
# x_adv_sampled = x_adv + x_train_data
#
# for step in range(self.ascent_num_steps):
# with torch.enable_grad():
#
# new_targets = torch.zeros(batch_size, 1).to(device)
# # new_targets = (1 - targets).to(self.device)
# new_targets = torch.squeeze(new_targets)
# new_targets = new_targets.to(torch.float)
#
# if half:
# logits = self.model.half_forward_end(x_adv_sampled)
# else:
# logits = self.model(x_adv_sampled)
#
# logits = torch.squeeze(logits, dim=1)
# new_loss = F.binary_cross_entropy_with_logits(logits, new_targets)
#
# grad = torch.autograd.grad(new_loss, [x_adv_sampled])[0]
# grad_norm = torch.norm(grad, p=2, dim=tuple(range(1, grad.dim())))
# grad_norm = grad_norm.view(-1, *[1] * (grad.dim() - 1))
# grad_normalized = grad / grad_norm
# with torch.no_grad():
# x_adv_sampled.add_(self.ascent_step_size * grad_normalized)
#
# if (step + 1) % 10 == 0:
# # Project the normal points to the set N_i(r)
# h = x_adv_sampled - x_train_data
# norm_h = torch.sqrt(torch.sum(h ** 2,
# dim=tuple(range(1, h.dim()))))
# alpha = torch.clamp(norm_h, self.radius,
# self.gamma * self.radius).to(device)
# # Make use of broadcast to project h
# proj = (alpha / norm_h).view(-1, *[1] * (h.dim() - 1))
# h = proj * h
# x_adv_sampled = x_train_data + h # These adv_points are now on the surface of hyper-sphere
#
# if half:
# adv_pred = self.model.half_forward_end(x_adv_sampled)
# else:
# adv_pred = self.model(x_adv_sampled)
#
# adv_pred = torch.squeeze(adv_pred, dim=1)
# adv_loss = F.binary_cross_entropy_with_logits(adv_pred, (new_targets))
#
# return adv_loss
#
# def save(self, path):
# torch.save(self.model.state_dict(), os.path.join(path, 'model.pt'))
#
# def load(self, path):
# self.model.load_state_dict(torch.load(os.path.join(path, 'model.pt')))
|
[
"torch.autograd.grad",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.randn",
"torch.squeeze",
"torch.clamp",
"torch.cuda.is_available",
"torch.enable_grad",
"torch.zeros",
"torch.no_grad"
] |
[((221, 246), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (244, 246), False, 'import torch\n'), ((1157, 1178), 'torch.squeeze', 'torch.squeeze', (['target'], {}), '(target)\n', (1170, 1178), False, 'import torch\n'), ((1358, 1386), 'torch.squeeze', 'torch.squeeze', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (1371, 1386), False, 'import torch\n'), ((1405, 1455), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['logits', 'target'], {}), '(logits, target)\n', (1439, 1455), True, 'import torch.nn.functional as F\n'), ((5010, 5040), 'torch.squeeze', 'torch.squeeze', (['adv_pred'], {'dim': '(1)'}), '(adv_pred, dim=1)\n', (5023, 5040), False, 'import torch\n'), ((5060, 5117), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['adv_pred', 'new_targets'], {}), '(adv_pred, new_targets)\n', (5094, 5117), True, 'import torch.nn.functional as F\n'), ((15298, 15326), 'torch.squeeze', 'torch.squeeze', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (15311, 15326), False, 'import torch\n'), ((15345, 15404), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['logits', 'target[lab_ind]'], {}), '(logits, target[lab_ind])\n', (15379, 15404), True, 'import torch.nn.functional as F\n'), ((18975, 19005), 'torch.squeeze', 'torch.squeeze', (['adv_pred'], {'dim': '(1)'}), '(adv_pred, dim=1)\n', (18988, 19005), False, 'import torch\n'), ((19025, 19082), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['adv_pred', 'new_targets'], {}), '(adv_pred, new_targets)\n', (19059, 19082), True, 'import torch.nn.functional as F\n'), ((3212, 3231), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (3229, 3231), False, 'import torch\n'), ((3394, 3420), 'torch.squeeze', 'torch.squeeze', (['new_targets'], {}), '(new_targets)\n', (3407, 3420), False, 'import torch\n'), ((3675, 3703), 'torch.squeeze', 'torch.squeeze', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (3688, 3703), False, 'import torch\n'), ((3731, 3786), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['logits', 'new_targets'], {}), '(logits, new_targets)\n', (3765, 3786), True, 'import torch.nn.functional as F\n'), ((4084, 4099), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4097, 4099), False, 'import torch\n'), ((17177, 17196), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (17194, 17196), False, 'import torch\n'), ((17359, 17385), 'torch.squeeze', 'torch.squeeze', (['new_targets'], {}), '(new_targets)\n', (17372, 17385), False, 'import torch\n'), ((17640, 17668), 'torch.squeeze', 'torch.squeeze', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (17653, 17668), False, 'import torch\n'), ((17696, 17751), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['logits', 'new_targets'], {}), '(logits, new_targets)\n', (17730, 17751), True, 'import torch.nn.functional as F\n'), ((18049, 18064), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18062, 18064), False, 'import torch\n'), ((3811, 3857), 'torch.autograd.grad', 'torch.autograd.grad', (['new_loss', '[x_adv_sampled]'], {}), '(new_loss, [x_adv_sampled])\n', (3830, 3857), False, 'import torch\n'), ((17776, 17822), 'torch.autograd.grad', 'torch.autograd.grad', (['new_loss', '[x_adv_sampled]'], {}), '(new_loss, [x_adv_sampled])\n', (17795, 17822), False, 'import torch\n'), ((3264, 3290), 'torch.zeros', 'torch.zeros', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (3275, 3290), False, 'import torch\n'), ((4481, 4539), 'torch.clamp', 'torch.clamp', (['norm_h', 'self.radius', '(self.gamma * self.radius)'], {}), '(norm_h, self.radius, self.gamma * self.radius)\n', (4492, 4539), False, 'import torch\n'), ((17229, 17255), 'torch.zeros', 'torch.zeros', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (17240, 17255), False, 'import torch\n'), ((18446, 18504), 'torch.clamp', 'torch.clamp', (['norm_h', 'self.radius', '(self.gamma * self.radius)'], {}), '(norm_h, self.radius, self.gamma * self.radius)\n', (18457, 18504), False, 'import torch\n'), ((3030, 3061), 'torch.randn', 'torch.randn', (['x_train_data.shape'], {}), '(x_train_data.shape)\n', (3041, 3061), False, 'import torch\n'), ((16995, 17026), 'torch.randn', 'torch.randn', (['x_train_data.shape'], {}), '(x_train_data.shape)\n', (17006, 17026), False, 'import torch\n')]
|
import argparse
import asyncio
import logging
import datetime
import sys
import json
from aiofile import AIOFile
async def read_from_socket(host, port):
timer = 0
reader, writer = None, None
async with AIOFile("text.txt", 'a') as _file:
while True:
try:
if not reader or not writer:
reader, writer = await asyncio.open_connection(host=host, port=port)
text = await reader.readline()
time_now = datetime.datetime.now().strftime("%y.%m.%d %H.%M")
await _file.write('[{}] {}'.format(time_now, text.decode("utf-8")))
print(text.decode("utf-8"))
except (ConnectionRefusedError, ConnectionResetError):
logging.warning('sleep %s seconds', 2 ** timer)
await asyncio.sleep(2 ** timer)
reader, writer = None, None
timer += 1
except asyncio.CancelledError:
writer.close()
raise
async def submit_message(host, port, args):
timer = 0
try:
async with AIOFile(args.token_file, 'r') as _file:
token = await _file.read()
except FileNotFoundError:
token = None
while True:
try:
reader, writer = await asyncio.open_connection(host=host, port=port)
temp = await reader.readline()
logging.debug(temp.decode("utf-8"))
if not token:
writer.write('\n'.encode())
await register(reader, writer, args)
else:
await authorise(reader, writer, args, token)
writer.write('{}\n\n'.format(args.text.replace('\n', ' ')).encode())
logging.info('text has been successfully sent')
return
except (ConnectionRefusedError, ConnectionResetError):
logging.warning('sleep %s seconds', 2 ** timer)
await asyncio.sleep(2 ** timer)
timer += 1
except asyncio.CancelledError:
writer.close()
raise
async def register(reader, writer, args):
if not args.user:
logging.error("It's obligated to specidy login if you do not have the correct token file")
logging.error('exiting')
sys.exit()
temp = await reader.readline()
logging.debug(temp.decode("utf-8"))
user = '{}\n'.format(args.user.replace('\n', ' '))
writer.write(user.encode())
answer = await reader.readline()
logging.debug(answer.decode("utf-8"))
answer_dict = json.loads(answer)
token = answer_dict['account_hash']
logging.debug(token)
async with AIOFile(args.token_file, 'w') as _file:
await _file.write(token)
async def authorise(reader, writer, args, token):
writer.write('{}\n'.format(token.replace('\n', '')).encode())
answer = await reader.readline()
logging.debug(answer.decode("utf-8"))
if answer.decode("utf-8") == 'null\n':
logging.warning("Wrong token, let's get another one")
await register(reader, writer, args)
async def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='connect to secret chat')
parser.add_argument('--host', default='minechat.dvmn.org', help='Host to connect')
parser.add_argument('--rport', default=5000, type=int, help='Specify port to receive msg')
parser.add_argument('--sport', default=5050, type=int, help='Specify port to send msg')
parser.add_argument('--user', help="set a username, it's oblicated for first run")
parser.add_argument('--token_file', default="token.txt", help="set a file with token")
parser.add_argument('--text', help="set a text to send")
parser.add_argument('--send_only', action='store_true', help="set a send only mode")
args = parser.parse_args()
tasks = []
if not args.send_only:
tasks.append(asyncio.create_task(read_from_socket(args.host, args.rport)))
if args.text:
tasks.append(asyncio.create_task(submit_message(args.host, args.sport, args)))
for task in tasks:
await task
if __name__ == "__main__":
try:
asyncio.run(main())
except KeyboardInterrupt:
pass
|
[
"logging.error",
"logging.debug",
"argparse.ArgumentParser",
"json.loads",
"logging.basicConfig",
"logging.warning",
"asyncio.open_connection",
"asyncio.sleep",
"logging.info",
"aiofile.AIOFile",
"datetime.datetime.now",
"sys.exit"
] |
[((2552, 2570), 'json.loads', 'json.loads', (['answer'], {}), '(answer)\n', (2562, 2570), False, 'import json\n'), ((2615, 2635), 'logging.debug', 'logging.debug', (['token'], {}), '(token)\n', (2628, 2635), False, 'import logging\n'), ((3093, 3132), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (3112, 3132), False, 'import logging\n'), ((3146, 3207), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""connect to secret chat"""'}), "(description='connect to secret chat')\n", (3169, 3207), False, 'import argparse\n'), ((217, 241), 'aiofile.AIOFile', 'AIOFile', (['"""text.txt"""', '"""a"""'], {}), "('text.txt', 'a')\n", (224, 241), False, 'from aiofile import AIOFile\n'), ((2150, 2250), 'logging.error', 'logging.error', (['"""It\'s obligated to specidy login if you do not have the correct token file"""'], {}), '(\n "It\'s obligated to specidy login if you do not have the correct token file"\n )\n', (2163, 2250), False, 'import logging\n'), ((2249, 2273), 'logging.error', 'logging.error', (['"""exiting"""'], {}), "('exiting')\n", (2262, 2273), False, 'import logging\n'), ((2282, 2292), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2290, 2292), False, 'import sys\n'), ((2651, 2680), 'aiofile.AIOFile', 'AIOFile', (['args.token_file', '"""w"""'], {}), "(args.token_file, 'w')\n", (2658, 2680), False, 'from aiofile import AIOFile\n'), ((2971, 3024), 'logging.warning', 'logging.warning', (['"""Wrong token, let\'s get another one"""'], {}), '("Wrong token, let\'s get another one")\n', (2986, 3024), False, 'import logging\n'), ((1110, 1139), 'aiofile.AIOFile', 'AIOFile', (['args.token_file', '"""r"""'], {}), "(args.token_file, 'r')\n", (1117, 1139), False, 'from aiofile import AIOFile\n'), ((1736, 1783), 'logging.info', 'logging.info', (['"""text has been successfully sent"""'], {}), "('text has been successfully sent')\n", (1748, 1783), False, 'import logging\n'), ((1304, 1349), 'asyncio.open_connection', 'asyncio.open_connection', ([], {'host': 'host', 'port': 'port'}), '(host=host, port=port)\n', (1327, 1349), False, 'import asyncio\n'), ((1878, 1925), 'logging.warning', 'logging.warning', (['"""sleep %s seconds"""', '(2 ** timer)'], {}), "('sleep %s seconds', 2 ** timer)\n", (1893, 1925), False, 'import logging\n'), ((760, 807), 'logging.warning', 'logging.warning', (['"""sleep %s seconds"""', '(2 ** timer)'], {}), "('sleep %s seconds', 2 ** timer)\n", (775, 807), False, 'import logging\n'), ((1944, 1969), 'asyncio.sleep', 'asyncio.sleep', (['(2 ** timer)'], {}), '(2 ** timer)\n', (1957, 1969), False, 'import asyncio\n'), ((378, 423), 'asyncio.open_connection', 'asyncio.open_connection', ([], {'host': 'host', 'port': 'port'}), '(host=host, port=port)\n', (401, 423), False, 'import asyncio\n'), ((498, 521), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (519, 521), False, 'import datetime\n'), ((830, 855), 'asyncio.sleep', 'asyncio.sleep', (['(2 ** timer)'], {}), '(2 ** timer)\n', (843, 855), False, 'import asyncio\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetGroupResult',
'AwaitableGetGroupResult',
'get_group',
]
@pulumi.output_type
class GetGroupResult:
"""
A collection of values returned by getGroup.
"""
def __init__(__self__, compartment_id=None, defined_tags=None, description=None, freeform_tags=None, group_id=None, id=None, inactive_state=None, name=None, state=None, time_created=None):
if compartment_id and not isinstance(compartment_id, str):
raise TypeError("Expected argument 'compartment_id' to be a str")
pulumi.set(__self__, "compartment_id", compartment_id)
if defined_tags and not isinstance(defined_tags, dict):
raise TypeError("Expected argument 'defined_tags' to be a dict")
pulumi.set(__self__, "defined_tags", defined_tags)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if freeform_tags and not isinstance(freeform_tags, dict):
raise TypeError("Expected argument 'freeform_tags' to be a dict")
pulumi.set(__self__, "freeform_tags", freeform_tags)
if group_id and not isinstance(group_id, str):
raise TypeError("Expected argument 'group_id' to be a str")
pulumi.set(__self__, "group_id", group_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if inactive_state and not isinstance(inactive_state, str):
raise TypeError("Expected argument 'inactive_state' to be a str")
pulumi.set(__self__, "inactive_state", inactive_state)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The OCID of the tenancy containing the group.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter
def description(self) -> str:
"""
The description you assign to the group. Does not have to be unique, and it's changeable.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter(name="groupId")
def group_id(self) -> str:
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def id(self) -> str:
"""
The OCID of the group.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="inactiveState")
def inactive_state(self) -> str:
"""
The detailed status of INACTIVE lifecycleState.
"""
return pulumi.get(self, "inactive_state")
@property
@pulumi.getter
def name(self) -> str:
"""
The name you assign to the group during creation. The name must be unique across all groups in the tenancy and cannot be changed.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> str:
"""
The group's current state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
Date and time the group was created, in the format defined by RFC3339. Example: `2016-08-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
class AwaitableGetGroupResult(GetGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGroupResult(
compartment_id=self.compartment_id,
defined_tags=self.defined_tags,
description=self.description,
freeform_tags=self.freeform_tags,
group_id=self.group_id,
id=self.id,
inactive_state=self.inactive_state,
name=self.name,
state=self.state,
time_created=self.time_created)
def get_group(group_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGroupResult:
"""
This data source provides details about a specific Group resource in Oracle Cloud Infrastructure Identity service.
Gets the specified group's information.
This operation does not return a list of all the users in the group. To do that, use
[ListUserGroupMemberships](https://docs.cloud.oracle.com/iaas/api/#/en/identity/20160918/UserGroupMembership/ListUserGroupMemberships) and
provide the group's OCID as a query parameter in the request.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_group = oci.identity.get_group(group_id=oci_identity_group["test_group"]["id"])
```
:param str group_id: The OCID of the group.
"""
__args__ = dict()
__args__['groupId'] = group_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:identity/getGroup:getGroup', __args__, opts=opts, typ=GetGroupResult).value
return AwaitableGetGroupResult(
compartment_id=__ret__.compartment_id,
defined_tags=__ret__.defined_tags,
description=__ret__.description,
freeform_tags=__ret__.freeform_tags,
group_id=__ret__.group_id,
id=__ret__.id,
inactive_state=__ret__.inactive_state,
name=__ret__.name,
state=__ret__.state,
time_created=__ret__.time_created)
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.set",
"pulumi.InvokeOptions",
"pulumi.runtime.invoke"
] |
[((2597, 2632), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""compartmentId"""'}), "(name='compartmentId')\n", (2610, 2632), False, 'import pulumi\n'), ((2818, 2851), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""definedTags"""'}), "(name='definedTags')\n", (2831, 2851), False, 'import pulumi\n'), ((3484, 3518), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""freeformTags"""'}), "(name='freeformTags')\n", (3497, 3518), False, 'import pulumi\n'), ((3943, 3972), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""groupId"""'}), "(name='groupId')\n", (3956, 3972), False, 'import pulumi\n'), ((4220, 4255), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""inactiveState"""'}), "(name='inactiveState')\n", (4233, 4255), False, 'import pulumi\n'), ((4868, 4901), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""timeCreated"""'}), "(name='timeCreated')\n", (4881, 4901), False, 'import pulumi\n'), ((871, 925), 'pulumi.set', 'pulumi.set', (['__self__', '"""compartment_id"""', 'compartment_id'], {}), "(__self__, 'compartment_id', compartment_id)\n", (881, 925), False, 'import pulumi\n'), ((1075, 1125), 'pulumi.set', 'pulumi.set', (['__self__', '"""defined_tags"""', 'defined_tags'], {}), "(__self__, 'defined_tags', defined_tags)\n", (1085, 1125), False, 'import pulumi\n'), ((1270, 1318), 'pulumi.set', 'pulumi.set', (['__self__', '"""description"""', 'description'], {}), "(__self__, 'description', description)\n", (1280, 1318), False, 'import pulumi\n'), ((1471, 1523), 'pulumi.set', 'pulumi.set', (['__self__', '"""freeform_tags"""', 'freeform_tags'], {}), "(__self__, 'freeform_tags', freeform_tags)\n", (1481, 1523), False, 'import pulumi\n'), ((1659, 1701), 'pulumi.set', 'pulumi.set', (['__self__', '"""group_id"""', 'group_id'], {}), "(__self__, 'group_id', group_id)\n", (1669, 1701), False, 'import pulumi\n'), ((1819, 1849), 'pulumi.set', 'pulumi.set', (['__self__', '"""id"""', 'id'], {}), "(__self__, 'id', id)\n", (1829, 1849), False, 'import pulumi\n'), ((2003, 2057), 'pulumi.set', 'pulumi.set', (['__self__', '"""inactive_state"""', 'inactive_state'], {}), "(__self__, 'inactive_state', inactive_state)\n", (2013, 2057), False, 'import pulumi\n'), ((2181, 2215), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (2191, 2215), False, 'import pulumi\n'), ((2342, 2378), 'pulumi.set', 'pulumi.set', (['__self__', '"""state"""', 'state'], {}), "(__self__, 'state', state)\n", (2352, 2378), False, 'import pulumi\n'), ((2526, 2576), 'pulumi.set', 'pulumi.set', (['__self__', '"""time_created"""', 'time_created'], {}), "(__self__, 'time_created', time_created)\n", (2536, 2576), False, 'import pulumi\n'), ((2763, 2797), 'pulumi.get', 'pulumi.get', (['self', '"""compartment_id"""'], {}), "(self, 'compartment_id')\n", (2773, 2797), False, 'import pulumi\n'), ((3194, 3226), 'pulumi.get', 'pulumi.get', (['self', '"""defined_tags"""'], {}), "(self, 'defined_tags')\n", (3204, 3226), False, 'import pulumi\n'), ((3432, 3463), 'pulumi.get', 'pulumi.get', (['self', '"""description"""'], {}), "(self, 'description')\n", (3442, 3463), False, 'import pulumi\n'), ((3889, 3922), 'pulumi.get', 'pulumi.get', (['self', '"""freeform_tags"""'], {}), "(self, 'freeform_tags')\n", (3899, 3922), False, 'import pulumi\n'), ((4019, 4047), 'pulumi.get', 'pulumi.get', (['self', '"""group_id"""'], {}), "(self, 'group_id')\n", (4029, 4047), False, 'import pulumi\n'), ((4177, 4199), 'pulumi.get', 'pulumi.get', (['self', '"""id"""'], {}), "(self, 'id')\n", (4187, 4199), False, 'import pulumi\n'), ((4388, 4422), 'pulumi.get', 'pulumi.get', (['self', '"""inactive_state"""'], {}), "(self, 'inactive_state')\n", (4398, 4422), False, 'import pulumi\n'), ((4661, 4685), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (4671, 4685), False, 'import pulumi\n'), ((4822, 4847), 'pulumi.get', 'pulumi.get', (['self', '"""state"""'], {}), "(self, 'state')\n", (4832, 4847), False, 'import pulumi\n'), ((5092, 5124), 'pulumi.get', 'pulumi.get', (['self', '"""time_created"""'], {}), "(self, 'time_created')\n", (5102, 5124), False, 'import pulumi\n'), ((6642, 6664), 'pulumi.InvokeOptions', 'pulumi.InvokeOptions', ([], {}), '()\n', (6662, 6664), False, 'import pulumi\n'), ((6756, 6856), 'pulumi.runtime.invoke', 'pulumi.runtime.invoke', (['"""oci:identity/getGroup:getGroup"""', '__args__'], {'opts': 'opts', 'typ': 'GetGroupResult'}), "('oci:identity/getGroup:getGroup', __args__, opts=opts,\n typ=GetGroupResult)\n", (6777, 6856), False, 'import pulumi\n')]
|
import unittest
from flask.ext.imagine.filters.interface import ImagineFilterInterface
class TestImagineFilterInterface(unittest.TestCase):
interface = None
def setUp(self):
self.interface = ImagineFilterInterface()
def test_not_implemented_apply_method(self):
with self.assertRaises(NotImplementedError):
self.interface.apply('')
|
[
"flask.ext.imagine.filters.interface.ImagineFilterInterface"
] |
[((210, 234), 'flask.ext.imagine.filters.interface.ImagineFilterInterface', 'ImagineFilterInterface', ([], {}), '()\n', (232, 234), False, 'from flask.ext.imagine.filters.interface import ImagineFilterInterface\n')]
|
#!/usr/bin/env python3
"""Separates Altera's junky concatenated CSV files into unique files
We do this in two steps:
1. Move all existing *.txt files to *.tmp files
2. Go through and break up at the start of each CSV file into a new file
"""
import os
import sys
from pathlib import Path
def main(root: str):
root_path = Path(root)
# Stash old files
tmp_files = [f for f in root_path.glob('*.txt')]
for tmp_file in tmp_files:
os.rename(tmp_file, tmp_file.with_suffix('.tmp'))
# Loop through and separate the new files
orig_files = [f for f in root_path.glob('*.tmp')]
for orig_file in orig_files:
idx = 0
new_file_name = orig_file.parent.joinpath(orig_file.stem + '-' +
str(idx) + '.txt')
new_file = open(new_file_name, 'w')
with open(orig_file, 'r', encoding='cp1252') as orig:
lines = orig.readlines()
for line in lines:
if line.startswith('Bank'):
new_file.close()
idx = idx + 1
new_file_name = orig_file.parent.joinpath(
orig_file.stem + '-' + str(idx) + '.txt')
new_file = open(new_file_name, 'w')
new_file.write(line)
else:
new_file.write(line)
new_file.close()
if __name__ == '__main__':
if len(sys.argv) == 1:
main(os.getcwd())
elif len(sys.argv) == 2:
main(sys.argv[1])
else:
print("Wrong number of args.")
|
[
"os.getcwd",
"pathlib.Path"
] |
[((330, 340), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (334, 340), False, 'from pathlib import Path\n'), ((1473, 1484), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1482, 1484), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from .. import models
from .bind import BindAdmin
admin.site.register(models.Bind, BindAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((165, 208), 'django.contrib.admin.site.register', 'admin.site.register', (['models.Bind', 'BindAdmin'], {}), '(models.Bind, BindAdmin)\n', (184, 208), False, 'from django.contrib import admin\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import utils
import math
class conv5x5(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, dilation=1):
super(conv5x5, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=5, stride=stride,
padding=2*dilation, dilation=dilation, bias=False)
self.conv = utils.spectral_norm(self.conv)
def forward(self, x):
return self.conv(x)
class conv3x3(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, dilation=1):
super(conv3x3, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, padding_mode='reflect', bias=False)
self.conv = utils.spectral_norm(self.conv)
def forward(self, x):
return self.conv(x)
class conv1x1(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(conv1x1, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
self.conv = utils.spectral_norm(self.conv)
def forward(self, x):
return self.conv(x)
class conv_zeros(nn.Module):
def __init__(self, in_channels, out_channels):
super(conv_zeros, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
nn.init.constant_(self.conv.weight, 0)
def forward(self, x):
return self.conv(x)
class PAKA3x3(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, dilation=1):
super(PAKA3x3, self).__init__()
self.conv = PAKA2d(in_channels, out_channels, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.conv = utils.spectral_norm(self.conv)
def forward(self, x):
return self.conv(x)
class PAKA2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, dilation=1, bias=False):
super(PAKA2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.weight = torch.nn.Parameter(torch.Tensor(out_channels, in_channels, kernel_size ** 2, 1, 1))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.conv_c = nn.Sequential(conv1x1(in_channels, in_channels, stride),
nn.ReLU(True),
conv_zeros(in_channels, in_channels),
)
self.conv_d = nn.Sequential(conv3x3(in_channels, in_channels, stride, dilation=dilation),
nn.ReLU(True),
conv_zeros(in_channels, kernel_size ** 2),
)
self.unfold = nn.Unfold(kernel_size, padding=padding, stride=stride, dilation=dilation)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.bias is None:
s += ', bias=False'
return s.format(**self.__dict__)
def forward(self, x):
b, n, h, w = x.shape
return F.conv3d(self.unfold(x).view(b, n, self.kernel_size ** 2, h//self.stride, w//self.stride) * (1 + torch.tanh(self.conv_d(x).unsqueeze(1)+self.conv_c(x).unsqueeze(2))),
self.weight, self.bias).squeeze(2)
class downsample(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels):
super(downsample, self).__init__()
self.conv1 = conv3x3(in_channels, hidden_channels)
self.conv2 = conv3x3(hidden_channels, out_channels, stride=2)
def forward(self, x):
h = self.conv1(x)
h = F.elu(h)
h = self.conv2(h)
h = F.elu(h)
return h
class upsample(nn.Module):
def __init__(self, in_channels, out_channels):
super(upsample, self).__init__()
self.conv1 = conv3x3(in_channels, out_channels*4)
self.conv2 = conv3x3(out_channels, out_channels)
def forward(self, x):
h = self.conv1(x)
h = F.pixel_shuffle(h, 2)
h = F.elu(h)
h = self.conv2(h)
h = F.elu(h)
return h
|
[
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.nn.ReLU",
"math.sqrt",
"torch.nn.utils.spectral_norm",
"torch.nn.Conv2d",
"torch.nn.Unfold",
"torch.nn.init.uniform_",
"torch.nn.init.constant_",
"torch.Tensor",
"torch.nn.functional.pixel_shuffle",
"torch.nn.functional.elu"
] |
[((267, 391), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(5)', 'stride': 'stride', 'padding': '(2 * dilation)', 'dilation': 'dilation', 'bias': '(False)'}), '(in_channels, out_channels, kernel_size=5, stride=stride, padding=\n 2 * dilation, dilation=dilation, bias=False)\n', (276, 391), True, 'import torch.nn as nn\n'), ((435, 465), 'torch.nn.utils.spectral_norm', 'utils.spectral_norm', (['self.conv'], {}), '(self.conv)\n', (454, 465), False, 'from torch.nn import utils\n'), ((681, 825), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': 'dilation', 'dilation': 'dilation', 'padding_mode': '"""reflect"""', 'bias': '(False)'}), "(in_channels, out_channels, kernel_size=3, stride=stride, padding=\n dilation, dilation=dilation, padding_mode='reflect', bias=False)\n", (690, 825), True, 'import torch.nn as nn\n'), ((871, 901), 'torch.nn.utils.spectral_norm', 'utils.spectral_norm', (['self.conv'], {}), '(self.conv)\n', (890, 901), False, 'from torch.nn import utils\n'), ((1104, 1182), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(1)', 'stride': 'stride', 'bias': '(False)'}), '(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)\n', (1113, 1182), True, 'import torch.nn as nn\n'), ((1203, 1233), 'torch.nn.utils.spectral_norm', 'utils.spectral_norm', (['self.conv'], {}), '(self.conv)\n', (1222, 1233), False, 'from torch.nn import utils\n'), ((1433, 1496), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(1)', 'bias': '(False)'}), '(in_channels, out_channels, kernel_size=1, bias=False)\n', (1442, 1496), True, 'import torch.nn as nn\n'), ((1505, 1543), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.conv.weight', '(0)'], {}), '(self.conv.weight, 0)\n', (1522, 1543), True, 'import torch.nn as nn\n'), ((1922, 1952), 'torch.nn.utils.spectral_norm', 'utils.spectral_norm', (['self.conv'], {}), '(self.conv)\n', (1941, 1952), False, 'from torch.nn import utils\n'), ((3043, 3116), 'torch.nn.Unfold', 'nn.Unfold', (['kernel_size'], {'padding': 'padding', 'stride': 'stride', 'dilation': 'dilation'}), '(kernel_size, padding=padding, stride=stride, dilation=dilation)\n', (3052, 3116), True, 'import torch.nn as nn\n'), ((4412, 4420), 'torch.nn.functional.elu', 'F.elu', (['h'], {}), '(h)\n', (4417, 4420), True, 'import torch.nn.functional as F\n'), ((4459, 4467), 'torch.nn.functional.elu', 'F.elu', (['h'], {}), '(h)\n', (4464, 4467), True, 'import torch.nn.functional as F\n'), ((4786, 4807), 'torch.nn.functional.pixel_shuffle', 'F.pixel_shuffle', (['h', '(2)'], {}), '(h, 2)\n', (4801, 4807), True, 'import torch.nn.functional as F\n'), ((4820, 4828), 'torch.nn.functional.elu', 'F.elu', (['h'], {}), '(h)\n', (4825, 4828), True, 'import torch.nn.functional as F\n'), ((4867, 4875), 'torch.nn.functional.elu', 'F.elu', (['h'], {}), '(h)\n', (4872, 4875), True, 'import torch.nn.functional as F\n'), ((2293, 2356), 'torch.Tensor', 'torch.Tensor', (['out_channels', 'in_channels', '(kernel_size ** 2)', '(1)', '(1)'], {}), '(out_channels, in_channels, kernel_size ** 2, 1, 1)\n', (2305, 2356), False, 'import torch\n'), ((2626, 2639), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2633, 2639), True, 'import torch.nn as nn\n'), ((2888, 2901), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2895, 2901), True, 'import torch.nn as nn\n'), ((3303, 3353), 'torch.nn.init._calculate_fan_in_and_fan_out', 'nn.init._calculate_fan_in_and_fan_out', (['self.weight'], {}), '(self.weight)\n', (3340, 3353), True, 'import torch.nn as nn\n'), ((3408, 3450), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.bias', '(-bound)', 'bound'], {}), '(self.bias, -bound, bound)\n', (3424, 3450), True, 'import torch.nn as nn\n'), ((2419, 2445), 'torch.Tensor', 'torch.Tensor', (['out_channels'], {}), '(out_channels)\n', (2431, 2445), False, 'import torch\n'), ((3231, 3243), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (3240, 3243), False, 'import math\n'), ((3378, 3395), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (3387, 3395), False, 'import math\n')]
|
import pytest
from dvc.dvcfile import Lockfile, LockfileCorruptedError
from dvc.stage import PipelineStage
from dvc.utils.serialize import dump_yaml
def test_stage_dump_no_outs_deps(tmp_dir, dvc):
stage = PipelineStage(name="s1", repo=dvc, path="path", cmd="command")
lockfile = Lockfile(dvc, "path.lock")
lockfile.dump(stage)
assert lockfile.load() == {"s1": {"cmd": "command"}}
def test_stage_dump_when_already_exists(tmp_dir, dvc):
data = {"s1": {"cmd": "command", "deps": [], "outs": []}}
dump_yaml("path.lock", data)
stage = PipelineStage(name="s2", repo=dvc, path="path", cmd="command2")
lockfile = Lockfile(dvc, "path.lock")
lockfile.dump(stage)
assert lockfile.load() == {
**data,
"s2": {"cmd": "command2"},
}
def test_stage_dump_with_deps_and_outs(tmp_dir, dvc):
data = {
"s1": {
"cmd": "command",
"deps": [{"md5": "1.txt", "path": "checksum"}],
"outs": [{"md5": "2.txt", "path": "checksum"}],
}
}
dump_yaml("path.lock", data)
lockfile = Lockfile(dvc, "path.lock")
stage = PipelineStage(name="s2", repo=dvc, path="path", cmd="command2")
lockfile.dump(stage)
assert lockfile.load() == {
**data,
"s2": {"cmd": "command2"},
}
def test_stage_overwrites_if_already_exists(tmp_dir, dvc):
lockfile = Lockfile(dvc, "path.lock",)
stage = PipelineStage(name="s2", repo=dvc, path="path", cmd="command2")
lockfile.dump(stage)
stage = PipelineStage(name="s2", repo=dvc, path="path", cmd="command3")
lockfile.dump(stage)
assert lockfile.load() == {
"s2": {"cmd": "command3"},
}
def test_load_when_lockfile_does_not_exist(tmp_dir, dvc):
assert {} == Lockfile(dvc, "pipelines.lock").load()
@pytest.mark.parametrize(
"corrupt_data",
[
{"s1": {"outs": []}},
{"s1": {}},
{
"s1": {
"cmd": "command",
"outs": [
{"md5": "checksum", "path": "path", "random": "value"}
],
}
},
{"s1": {"cmd": "command", "deps": [{"md5": "checksum"}]}},
],
)
def test_load_when_lockfile_is_corrupted(tmp_dir, dvc, corrupt_data):
dump_yaml("Dvcfile.lock", corrupt_data)
lockfile = Lockfile(dvc, "Dvcfile.lock")
with pytest.raises(LockfileCorruptedError) as exc_info:
lockfile.load()
assert "Dvcfile.lock" in str(exc_info.value)
|
[
"dvc.stage.PipelineStage",
"dvc.utils.serialize.dump_yaml",
"pytest.raises",
"dvc.dvcfile.Lockfile",
"pytest.mark.parametrize"
] |
[((1797, 2037), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""corrupt_data"""', "[{'s1': {'outs': []}}, {'s1': {}}, {'s1': {'cmd': 'command', 'outs': [{\n 'md5': 'checksum', 'path': 'path', 'random': 'value'}]}}, {'s1': {'cmd':\n 'command', 'deps': [{'md5': 'checksum'}]}}]"], {}), "('corrupt_data', [{'s1': {'outs': []}}, {'s1': {}},\n {'s1': {'cmd': 'command', 'outs': [{'md5': 'checksum', 'path': 'path',\n 'random': 'value'}]}}, {'s1': {'cmd': 'command', 'deps': [{'md5':\n 'checksum'}]}}])\n", (1820, 2037), False, 'import pytest\n'), ((212, 274), 'dvc.stage.PipelineStage', 'PipelineStage', ([], {'name': '"""s1"""', 'repo': 'dvc', 'path': '"""path"""', 'cmd': '"""command"""'}), "(name='s1', repo=dvc, path='path', cmd='command')\n", (225, 274), False, 'from dvc.stage import PipelineStage\n'), ((290, 316), 'dvc.dvcfile.Lockfile', 'Lockfile', (['dvc', '"""path.lock"""'], {}), "(dvc, 'path.lock')\n", (298, 316), False, 'from dvc.dvcfile import Lockfile, LockfileCorruptedError\n'), ((522, 550), 'dvc.utils.serialize.dump_yaml', 'dump_yaml', (['"""path.lock"""', 'data'], {}), "('path.lock', data)\n", (531, 550), False, 'from dvc.utils.serialize import dump_yaml\n'), ((563, 626), 'dvc.stage.PipelineStage', 'PipelineStage', ([], {'name': '"""s2"""', 'repo': 'dvc', 'path': '"""path"""', 'cmd': '"""command2"""'}), "(name='s2', repo=dvc, path='path', cmd='command2')\n", (576, 626), False, 'from dvc.stage import PipelineStage\n'), ((642, 668), 'dvc.dvcfile.Lockfile', 'Lockfile', (['dvc', '"""path.lock"""'], {}), "(dvc, 'path.lock')\n", (650, 668), False, 'from dvc.dvcfile import Lockfile, LockfileCorruptedError\n'), ((1038, 1066), 'dvc.utils.serialize.dump_yaml', 'dump_yaml', (['"""path.lock"""', 'data'], {}), "('path.lock', data)\n", (1047, 1066), False, 'from dvc.utils.serialize import dump_yaml\n'), ((1082, 1108), 'dvc.dvcfile.Lockfile', 'Lockfile', (['dvc', '"""path.lock"""'], {}), "(dvc, 'path.lock')\n", (1090, 1108), False, 'from dvc.dvcfile import Lockfile, LockfileCorruptedError\n'), ((1121, 1184), 'dvc.stage.PipelineStage', 'PipelineStage', ([], {'name': '"""s2"""', 'repo': 'dvc', 'path': '"""path"""', 'cmd': '"""command2"""'}), "(name='s2', repo=dvc, path='path', cmd='command2')\n", (1134, 1184), False, 'from dvc.stage import PipelineStage\n'), ((1375, 1401), 'dvc.dvcfile.Lockfile', 'Lockfile', (['dvc', '"""path.lock"""'], {}), "(dvc, 'path.lock')\n", (1383, 1401), False, 'from dvc.dvcfile import Lockfile, LockfileCorruptedError\n'), ((1415, 1478), 'dvc.stage.PipelineStage', 'PipelineStage', ([], {'name': '"""s2"""', 'repo': 'dvc', 'path': '"""path"""', 'cmd': '"""command2"""'}), "(name='s2', repo=dvc, path='path', cmd='command2')\n", (1428, 1478), False, 'from dvc.stage import PipelineStage\n'), ((1516, 1579), 'dvc.stage.PipelineStage', 'PipelineStage', ([], {'name': '"""s2"""', 'repo': 'dvc', 'path': '"""path"""', 'cmd': '"""command3"""'}), "(name='s2', repo=dvc, path='path', cmd='command3')\n", (1529, 1579), False, 'from dvc.stage import PipelineStage\n'), ((2257, 2296), 'dvc.utils.serialize.dump_yaml', 'dump_yaml', (['"""Dvcfile.lock"""', 'corrupt_data'], {}), "('Dvcfile.lock', corrupt_data)\n", (2266, 2296), False, 'from dvc.utils.serialize import dump_yaml\n'), ((2312, 2341), 'dvc.dvcfile.Lockfile', 'Lockfile', (['dvc', '"""Dvcfile.lock"""'], {}), "(dvc, 'Dvcfile.lock')\n", (2320, 2341), False, 'from dvc.dvcfile import Lockfile, LockfileCorruptedError\n'), ((2351, 2388), 'pytest.raises', 'pytest.raises', (['LockfileCorruptedError'], {}), '(LockfileCorruptedError)\n', (2364, 2388), False, 'import pytest\n'), ((1755, 1786), 'dvc.dvcfile.Lockfile', 'Lockfile', (['dvc', '"""pipelines.lock"""'], {}), "(dvc, 'pipelines.lock')\n", (1763, 1786), False, 'from dvc.dvcfile import Lockfile, LockfileCorruptedError\n')]
|
# Import modulov
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
# Výpočtová oblasť
xmin = -1.0
xmax = 1.0
xn = 101 # Počet vzorkovacích bodov funkcie "f" na intervale "[xmin, xmax]"
ymin = xmin
ymax = xmax
yn = xn # Počet vzorkovacích bodov funkcie "f" na intervale "[ymin, ymax]"
xngrad = 10 # Zobrazený bude každý "xngrad" vzorkovací bod v smere osi "x"
yngrad = xngrad # Zobrazený bude každý "yngrad" vzorkovací bod v smere osi "y"
# Tvorba gridu
x, y = np.meshgrid(np.linspace(xmin, xmax, xn), np.linspace(ymin, ymax, yn))
# Výpočet funkcie
f = np.sin(2.0 * x) + np.cos(2.0 * y)
# Výpočet derivácií "f" podľa "x" a "y"
fx = 2.0 * np.cos(2.0 * x)
fy = -2.0 * np.sin(2.0 * y)
# Vykreslenie
fig, ax = plt.subplots(figsize=(12.0 / 2.54, 8.0 / 2.54))
im = ax.imshow(f, extent=(xmin, xmax, ymin, ymax), cmap="bwr",
vmin=-np.abs(f).max(), vmax=np.abs(f).max())
ax.quiver( x[::xngrad, ::xngrad], y[::yngrad, ::yngrad],
fx[::xngrad, ::xngrad], fy[::yngrad, ::yngrad])
ax.set_xlabel("$x$")
ax.set_ylabel("$y$")
ax.set_xticks(np.linspace(xmin, xmax, 6))
ax.set_yticks(np.linspace(ymin, ymax, 6))
fig.colorbar(im)
plt.show()
fig.savefig("../latex/fig-f-gradf.pdf")
|
[
"matplotlib.rc",
"matplotlib.pyplot.show",
"numpy.abs",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.subplots"
] |
[((94, 117), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (96, 117), False, 'from matplotlib import rc\n'), ((783, 830), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12.0 / 2.54, 8.0 / 2.54)'}), '(figsize=(12.0 / 2.54, 8.0 / 2.54))\n', (795, 830), True, 'import matplotlib.pyplot as plt\n'), ((1213, 1223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1221, 1223), True, 'import matplotlib.pyplot as plt\n'), ((546, 573), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'xn'], {}), '(xmin, xmax, xn)\n', (557, 573), True, 'import numpy as np\n'), ((575, 602), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'yn'], {}), '(ymin, ymax, yn)\n', (586, 602), True, 'import numpy as np\n'), ((627, 642), 'numpy.sin', 'np.sin', (['(2.0 * x)'], {}), '(2.0 * x)\n', (633, 642), True, 'import numpy as np\n'), ((645, 660), 'numpy.cos', 'np.cos', (['(2.0 * y)'], {}), '(2.0 * y)\n', (651, 660), True, 'import numpy as np\n'), ((714, 729), 'numpy.cos', 'np.cos', (['(2.0 * x)'], {}), '(2.0 * x)\n', (720, 729), True, 'import numpy as np\n'), ((742, 757), 'numpy.sin', 'np.sin', (['(2.0 * y)'], {}), '(2.0 * y)\n', (748, 757), True, 'import numpy as np\n'), ((1126, 1152), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(6)'], {}), '(xmin, xmax, 6)\n', (1137, 1152), True, 'import numpy as np\n'), ((1168, 1194), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', '(6)'], {}), '(ymin, ymax, 6)\n', (1179, 1194), True, 'import numpy as np\n'), ((937, 946), 'numpy.abs', 'np.abs', (['f'], {}), '(f)\n', (943, 946), True, 'import numpy as np\n'), ((915, 924), 'numpy.abs', 'np.abs', (['f'], {}), '(f)\n', (921, 924), True, 'import numpy as np\n')]
|
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
# url = 'https://fontawesome.com/cheatsheet/pro'
# req = requests.get(url)
# markup = req.text
# print(markup)
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
browser = webdriver.Chrome()
browser.get("https://fontawesome.com/cheatsheet/pro")
delay = 15 # seconds
icon_list = '<select>\n\t<option value="">No icon</option>\n'
def make_icon_format_string(font_awesome_icon):
return "\t<option>" + font_awesome_icon + '</option>'
# Please enter in blacklist items in the following format
blacklist = ['far fa-reply', 'fal fa-reply', 'fas fa-reply', 'far fa-republican', 'fal fa-republican',
'fas fa-republican', 'fab fa-youtube-square', 'fas fa-angle-up',
'fas fa-hand-middle-finger', 'far fa-hand-middle-finger', 'fal fa-hand-middle-finger',
'fas fa-bong', 'fal fa-bong', 'far fa-bong', 'fas fa-cannabis', 'fal fa-cannabis', 'far fa-cannabis',
'fas fa-mosque', 'far fa-mosque', 'fal fa-mosque', 'fal fa-church', 'far fa-church', 'fas fa-church',
'far fa-clipboard', 'far fa-democrat', 'fas fa-democrat', 'fal fa-democrat']
blacklist = [make_icon_format_string(string) for string in blacklist]
try:
myElem = WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.ID, 'reply')))
soup = BeautifulSoup(browser.page_source, features='html.parser')
solid_icons = soup.find("section", {'id': 'solid'}).find_all('article')
solid_icon_values = ['\t<option>fas fa-' + x.attrs['id'] + '</option>' for x in solid_icons
if '\t<option>fas fa-' + x.attrs['id'] + '</option>' not in blacklist]
icon_list += '\n'.join(solid_icon_values)
regular_icons = soup.find("section", {'id': 'regular'}).find_all('article')
regular_icon_values = ['\t<option>far fa-' + x.attrs['id'] + '</option>' for x in regular_icons
if '\t<option>far fa-' + x.attrs['id'] + '</option>' not in blacklist]
icon_list += '\n'.join(regular_icon_values)
light_icons = soup.find("section", {'id': 'light'}).find_all('article')
light_icon_values = ['\t<option>fal fa-' + x.attrs['id'] + '</option>' for x in light_icons
if '\t<option>fal fa-' + x.attrs['id'] + '</option>' not in blacklist]
icon_list += '\n'.join(light_icon_values)
brand_icons = soup.find("section", {'id': 'brands'}).find_all('article')
brand_icon_values = ['\t<option>fab fa-' + x.attrs['id'] + '</option>' for x in brand_icons
if '\t<option>fab fa-' + x.attrs['id'] + '</option>' not in blacklist]
icon_list += '\n'.join(brand_icon_values)
except TimeoutException:
print('timeout exception')
icon_list += '\n</select>'
with open('fa-icons.txt', 'w+') as file:
file.write(icon_list)
|
[
"bs4.BeautifulSoup",
"selenium.webdriver.support.expected_conditions.presence_of_element_located",
"selenium.webdriver.support.ui.WebDriverWait",
"selenium.webdriver.Chrome"
] |
[((453, 471), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (469, 471), False, 'from selenium import webdriver\n'), ((1565, 1623), 'bs4.BeautifulSoup', 'BeautifulSoup', (['browser.page_source'], {'features': '"""html.parser"""'}), "(browser.page_source, features='html.parser')\n", (1578, 1623), False, 'from bs4 import BeautifulSoup\n'), ((1504, 1552), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.ID, 'reply')"], {}), "((By.ID, 'reply'))\n", (1534, 1552), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1468, 1497), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['browser', 'delay'], {}), '(browser, delay)\n', (1481, 1497), False, 'from selenium.webdriver.support.ui import WebDriverWait\n')]
|
import requests
result = requests.post(
"https://asia-northeast1-mlops-331003.cloudfunctions.net/function-1",
json={"msg": "Hello from cloud functions"},
)
print(result.json())
|
[
"requests.post"
] |
[((28, 164), 'requests.post', 'requests.post', (['"""https://asia-northeast1-mlops-331003.cloudfunctions.net/function-1"""'], {'json': "{'msg': 'Hello from cloud functions'}"}), "(\n 'https://asia-northeast1-mlops-331003.cloudfunctions.net/function-1',\n json={'msg': 'Hello from cloud functions'})\n", (41, 164), False, 'import requests\n')]
|
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = os.environ.get('SECRET_KEY')
ALLOWED_HOSTS = ['.herokuapp.com', 'localhost', '127.0.0.1']
ENIVRONMENT = os.environ.get('ENVIRONMENT', default='development')
SECRET_KEY = os.environ.get('SECRET_KEY')
DEBUG = int(os.environ.get('DEBUG', default=0))
USE_S3 = int(os.environ.get('USE_S3', default=1))
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic', # whitenoise
'django.contrib.staticfiles',
'django.contrib.sites',
# Third party
'crispy_forms',
'allauth',
'allauth.account',
#'storages',
'ckeditor',
'ckeditor_uploader',
'debug_toolbar',
# Local
'users.apps.UsersConfig',
'pages.apps.PagesConfig',
'articles.apps.ArticlesConfig',
'payments.apps.PaymentsConfig',
]
MIDDLEWARE = [
#'django.middleware.cache.UpdateCacheMiddleware', # caching
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware', # whitenoise
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
#'django.middleware.cache.FetchFromCacheMiddleware', # caching
]
ROOT_URLCONF = 'news_outlet.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'news_outlet.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': '<PASSWORD>',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# Django all-auth
AUTH_USER_MODEL = 'users.CustomUser'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
SITE_ID = 1
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
# Crispy forms
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Static files storage
if USE_S3:
# AWS settings
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'
AWS_S3_FILE_OVERWRITE = False
AWS_DEFAULT_ACL = 'public-read'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# s3 static settings
AWS_LOCATION = 'static'
STATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{AWS_LOCATION}/'
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
else:
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# STATICFILES_FINDERS = [
# "django.contrib.staticfiles.finders.FileSystemFinder",
# "django.contrib.staticfiles.finders.AppDirectoriesFinder",
# ]
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'),]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# emails
if int(os.environ.get('EMAIL')):
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_USE_TLS = int(os.environ.get('EMAIL_USE_TLS'))
EMAIL_PORT = int(os.environ.get('EMAIL_PORT'))
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
else:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = '<EMAIL>'
#production
if ENIVRONMENT == 'production':
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
SECURE_SSL_REDIRECT = True
SECURE_HSTS_SECONDS = 3600
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#ckEditor
X_FRAME_OPTIONS = 'SAMEORIGIN'
CKEDITOR_UPLOAD_PATH = 'uploads/'
CKEDITOR_IMAGE_BACKEND = 'pillow'
CKEDITOR_BROWSE_SHOW_DIRS = True
CKEDITOR_CONFIGS = {
'default': {
'toolbar': None,
'extraPlugins': 'codesnippet',
},
}
# Stripe
STRIPE_TEST_PUBLISHABLE_KEY=os.environ.get('STRIPE_TEST_PUBLISHABLE_KEY')
STRIPE_TEST_SECRET_KEY=os.environ.get('STRIPE_TEST_SECRET_KEY')
# Caching
# CACHE_MIDDLEWARE_ALIAS = 'default'
# CACHE_MIDDLEWARE_SECONDS = 604800
# CACHE_MIDDLEWARE_KEY_PREFIX = ''
# django-debug-toolbar
import socket
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS = [ip[:-1] + "1" for ip in ips]
# Heroku
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
|
[
"os.environ.get",
"socket.gethostname",
"pathlib.Path",
"os.path.join",
"dj_database_url.config"
] |
[((165, 193), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (179, 193), False, 'import os\n'), ((270, 322), 'os.environ.get', 'os.environ.get', (['"""ENVIRONMENT"""'], {'default': '"""development"""'}), "('ENVIRONMENT', default='development')\n", (284, 322), False, 'import os\n'), ((336, 364), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (350, 364), False, 'import os\n'), ((5085, 5116), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""media"""'], {}), "(BASE_DIR, 'media')\n", (5097, 5116), False, 'import os\n'), ((6323, 6368), 'os.environ.get', 'os.environ.get', (['"""STRIPE_TEST_PUBLISHABLE_KEY"""'], {}), "('STRIPE_TEST_PUBLISHABLE_KEY')\n", (6337, 6368), False, 'import os\n'), ((6392, 6432), 'os.environ.get', 'os.environ.get', (['"""STRIPE_TEST_SECRET_KEY"""'], {}), "('STRIPE_TEST_SECRET_KEY')\n", (6406, 6432), False, 'import os\n'), ((6751, 6791), 'dj_database_url.config', 'dj_database_url.config', ([], {'conn_max_age': '(500)'}), '(conn_max_age=500)\n', (6773, 6791), False, 'import dj_database_url\n'), ((377, 411), 'os.environ.get', 'os.environ.get', (['"""DEBUG"""'], {'default': '(0)'}), "('DEBUG', default=0)\n", (391, 411), False, 'import os\n'), ((426, 461), 'os.environ.get', 'os.environ.get', (['"""USE_S3"""'], {'default': '(1)'}), "('USE_S3', default=1)\n", (440, 461), False, 'import os\n'), ((4077, 4112), 'os.environ.get', 'os.environ.get', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (4091, 4112), False, 'import os\n'), ((4141, 4180), 'os.environ.get', 'os.environ.get', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (4155, 4180), False, 'import os\n'), ((4211, 4252), 'os.environ.get', 'os.environ.get', (['"""AWS_STORAGE_BUCKET_NAME"""'], {}), "('AWS_STORAGE_BUCKET_NAME')\n", (4225, 4252), False, 'import os\n'), ((4778, 4815), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""staticfiles"""'], {}), "(BASE_DIR, 'staticfiles')\n", (4790, 4815), False, 'import os\n'), ((5012, 5044), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""static"""'], {}), "(BASE_DIR, 'static')\n", (5024, 5044), False, 'import os\n'), ((5136, 5159), 'os.environ.get', 'os.environ.get', (['"""EMAIL"""'], {}), "('EMAIL')\n", (5150, 5159), False, 'import os\n'), ((5245, 5273), 'os.environ.get', 'os.environ.get', (['"""EMAIL_HOST"""'], {}), "('EMAIL_HOST')\n", (5259, 5273), False, 'import os\n'), ((5404, 5437), 'os.environ.get', 'os.environ.get', (['"""EMAIL_HOST_USER"""'], {}), "('EMAIL_HOST_USER')\n", (5418, 5437), False, 'import os\n'), ((5464, 5501), 'os.environ.get', 'os.environ.get', (['"""EMAIL_HOST_PASSWORD"""'], {}), "('EMAIL_HOST_PASSWORD')\n", (5478, 5501), False, 'import os\n'), ((6634, 6654), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (6652, 6654), False, 'import socket\n'), ((5298, 5329), 'os.environ.get', 'os.environ.get', (['"""EMAIL_USE_TLS"""'], {}), "('EMAIL_USE_TLS')\n", (5312, 5329), False, 'import os\n'), ((5352, 5380), 'os.environ.get', 'os.environ.get', (['"""EMAIL_PORT"""'], {}), "('EMAIL_PORT')\n", (5366, 5380), False, 'import os\n'), ((1882, 1917), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""templates"""'], {}), "(BASE_DIR, 'templates')\n", (1894, 1917), False, 'import os\n'), ((112, 126), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (116, 126), False, 'from pathlib import Path\n')]
|
import numpy as np
import scipy
from ._hist import take_bins
__all__ = ['ecdf']
__EPSILON__ = 1e-8
#--------------------------------------------------------------------
def ecdf(x,y=None):
'''
Empirical Cumulative Density Function (ECDF).
Parameters
-----------
* x,y: 1d ndarrays,
if y is None, than ecdf only by x will be taken.
Returns
--------
* if y is not None -> (bins,out_x, out_y);
* if y is None -> (bins,out_x).
Notes
-------
* Based on scipy implementation.
* If y is not None, ECDF will be constructed on the joint x and y.
* If y is None, only bins and cdf(x) (2 argument) will be returned.
* ECDF is calculated as:
bins = sort(concatenate(x,y)),
cdf_x = (serch&past bins in sort(x))/size(x),
cdf_y = (serch&past bins in sort(y))/size(y),
where:
* bins - bins for cdfs (if y is not None, joint bins).
'''
x = np.array(x)
x = np.sort(x)
ret2 =True
if (y is not None):
y = np.array(y)
y = np.sort(y)
else:
ret2 = False
y=np.array([])
bins = np.concatenate((x,y))
bins=np.sort(bins)
x_cdf = np.searchsorted(x,bins, 'right')
y_cdf = np.searchsorted(y,bins, 'right')
x_cdf = (x_cdf) / x.shape[0]
y_cdf = (y_cdf) / y.shape[0]
out = (bins,x_cdf)
if (ret2):
out= (bins,x_cdf,y_cdf)
return out
#--------------------------------------------------------------------
def hist2cdf(hist_x, normalize = True):
'''
The cumulative density function made by histogram.
Parameters:
* hist_x 1d histogram (ndarray).
Returns:
* cfd(hist_x) (Cumulative Density Function).
'''
hist_x = np.asarray(hist_x)
out = np.cumsum(hist_x)
if(normalize):
out /=np.max(out)
# TODO: out /=x.size # more simple!
return out
#--------------------------------------------------------------------
def cdf_by_hist(x,y=None,n_bins = None, bins = None, take_mean=False):
'''
Cumulative density function constructed by histogram.
Parameters:
* x,y: 1d ndarrays;
* n_bins: required number of uniformly distributed bins,
* work only if bins is None.
* bins: grid of prepared bins (can be ununiform)
* take_mean: sustrauct mean if ture.
Returns:
* y is not None -> (out_x, out_y,bins)
* y is None -> (out_x,bins)
Notes:
* If bins is None and n_bins is None:
bins = np.sort(np.concatenate((x,y))).
This case make the same result as ecdf!
* If bins is None and n_bins <=0: n_bins = x.shape[0];
The case of uniform bins grid! (Differ from ECDF).
* For tests: modes n_bins = 't10' and n_bins = 't5'
for obtaining uniform bins with x shape/10 and /5 correspondingly
'''
#FIXME: the results are sligthly differ from ecdf
# TODO: the case xy is the same as for ecfd, but uniform bins may be more valid (see tests)
if(bins is None and n_bins is None):
bins = take_bins(x,y, n_bins='xy')
elif(n_bins == 't10' and bins is None):
bins = take_bins(x,y, n_bins=x.shape[0]//10)
elif(n_bins == 't5' and bins is None):
bins = take_bins(x,y, n_bins=x.shape[0]//5)
if(y is None):
bins, out_x = hist(x,y=None,n_bins = n_bins, bins = bins, take_mean=take_mean)
out_x = hist2cdf(out_x, normalize = True)
out = (bins, out_x )
else:
bins, out_x, out_y = hist(x,y=y,n_bins = n_bins, bins = bins, take_mean=take_mean)
out_x = hist2cdf(out_x, normalize = True)
out_y = hist2cdf(out_y, normalize = True)
out = (bins,out_x, out_y)
return out
|
[
"numpy.asarray",
"numpy.searchsorted",
"numpy.sort",
"numpy.cumsum",
"numpy.max",
"numpy.array",
"numpy.concatenate"
] |
[((969, 980), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (977, 980), True, 'import numpy as np\n'), ((989, 999), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (996, 999), True, 'import numpy as np\n'), ((1165, 1187), 'numpy.concatenate', 'np.concatenate', (['(x, y)'], {}), '((x, y))\n', (1179, 1187), True, 'import numpy as np\n'), ((1196, 1209), 'numpy.sort', 'np.sort', (['bins'], {}), '(bins)\n', (1203, 1209), True, 'import numpy as np\n'), ((1222, 1255), 'numpy.searchsorted', 'np.searchsorted', (['x', 'bins', '"""right"""'], {}), "(x, bins, 'right')\n", (1237, 1255), True, 'import numpy as np\n'), ((1267, 1300), 'numpy.searchsorted', 'np.searchsorted', (['y', 'bins', '"""right"""'], {}), "(y, bins, 'right')\n", (1282, 1300), True, 'import numpy as np\n'), ((1800, 1818), 'numpy.asarray', 'np.asarray', (['hist_x'], {}), '(hist_x)\n', (1810, 1818), True, 'import numpy as np\n'), ((1834, 1851), 'numpy.cumsum', 'np.cumsum', (['hist_x'], {}), '(hist_x)\n', (1843, 1851), True, 'import numpy as np\n'), ((1056, 1067), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1064, 1067), True, 'import numpy as np\n'), ((1080, 1090), 'numpy.sort', 'np.sort', (['y'], {}), '(y)\n', (1087, 1090), True, 'import numpy as np\n'), ((1132, 1144), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1140, 1144), True, 'import numpy as np\n'), ((1890, 1901), 'numpy.max', 'np.max', (['out'], {}), '(out)\n', (1896, 1901), True, 'import numpy as np\n')]
|
import datetime
import json
import dateutil.parser
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from rest_framework import status
from apps.physicaldevice.models import Device
from apps.streamfilter.models import *
from apps.utils.gid.convert import *
from apps.utils.test_util import TestMixin
from ..models import *
user_model = get_user_model()
class DeviceLocationTestCase(TestMixin, TestCase):
def setUp(self):
self.usersTestSetup()
self.orgTestSetup()
self.deviceTemplateTestSetup()
self.pd1 = Device.objects.create_device(project=self.p1, label='d1', template=self.dt1, created_by=self.u2)
self.pd2 = Device.objects.create_device(project=self.p2, label='d2', template=self.dt1, created_by=self.u3)
def tearDown(self):
DeviceLocation.objects.all().delete()
Device.objects.all().delete()
self.deviceTemplateTestTearDown()
self.orgTestTearDown()
self.userTestTearDown()
def testLocation(self):
location = DeviceLocation.objects.create(
timestamp=timezone.now(),
target_slug=self.pd1.slug,
user=self.u2
)
self.assertIsNotNone(location)
self.assertEqual(location.target.id, self.pd1.id)
def testMemberPermissions(self):
"""
Test that people with no permissions cannot access
"""
map_url = reverse('devicelocation:map', kwargs={'slug': self.pd1.slug})
self.client.login(email='<EMAIL>', password='<PASSWORD>')
membership = self.p1.org.register_user(self.u3, role='m1')
membership.permissions['can_read_device_locations'] = False
membership.save()
resp = self.client.get(map_url)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
membership.permissions['can_read_device_locations'] = True
membership.permissions['can_access_classic'] = False
membership.save()
resp = self.client.get(map_url)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.client.logout()
|
[
"django.utils.timezone.now",
"django.contrib.auth.get_user_model",
"django.urls.reverse",
"apps.physicaldevice.models.Device.objects.create_device",
"apps.physicaldevice.models.Device.objects.all"
] |
[((438, 454), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (452, 454), False, 'from django.contrib.auth import get_user_model\n'), ((646, 746), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', ([], {'project': 'self.p1', 'label': '"""d1"""', 'template': 'self.dt1', 'created_by': 'self.u2'}), "(project=self.p1, label='d1', template=self.dt1,\n created_by=self.u2)\n", (674, 746), False, 'from apps.physicaldevice.models import Device\n'), ((762, 862), 'apps.physicaldevice.models.Device.objects.create_device', 'Device.objects.create_device', ([], {'project': 'self.p2', 'label': '"""d2"""', 'template': 'self.dt1', 'created_by': 'self.u3'}), "(project=self.p2, label='d2', template=self.dt1,\n created_by=self.u3)\n", (790, 862), False, 'from apps.physicaldevice.models import Device\n'), ((1501, 1562), 'django.urls.reverse', 'reverse', (['"""devicelocation:map"""'], {'kwargs': "{'slug': self.pd1.slug}"}), "('devicelocation:map', kwargs={'slug': self.pd1.slug})\n", (1508, 1562), False, 'from django.urls import reverse\n'), ((938, 958), 'apps.physicaldevice.models.Device.objects.all', 'Device.objects.all', ([], {}), '()\n', (956, 958), False, 'from apps.physicaldevice.models import Device\n'), ((1174, 1188), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1186, 1188), False, 'from django.utils import timezone\n')]
|
import operator
import math
import numpy as np
from rtlsdr import RtlSdr
import matplotlib.pyplot as plt
# Available sample rates
'''
3200000Hz
2800000Hz
2560000Hz
2400000Hz
2048000Hz
1920000Hz
1800000Hz
1400000Hz
1024000Hz
900001Hz
250000Hz
'''
# Receiver class. This needs receiving parameters and will receive data from the SDR
class Receiver:
def __init__(self, sample_rate, ppm, resolution, num_FFT, num_med):
self.sdr = RtlSdr()
# configure SDR
self.sdr.sample_rate = sample_rate
self.sdr.center_freq = 1420405000
# For some reason the SDR doesn't want to set the offset PPM to 0 so we avoid that
if ppm != 0:
self.sdr.freq_correction = ppm
self.sdr.gain = 'auto'
self.resolution = 2**resolution
self.num_FFT = num_FFT
self.num_med = num_med
# Reads data from SDR, processes and writes it
def receive(self):
print(f'Receiving {self.num_FFT} bins of {self.resolution} samples each...')
data_PSD = self.sample()
# Observed frequency range
start_freq = self.sdr.center_freq - self.sdr.sample_rate/2
stop_freq = self.sdr.center_freq + self.sdr.sample_rate/2
freqs = np.linspace(start = start_freq, stop = stop_freq, num = self.resolution)
# Samples a blank spectrum to callibrate spectrum with.
self.sdr.center_freq = self.sdr.center_freq + 3000000
blank_PSD = self.sample()
SNR_spectrum = self.estimate_SNR(data = data_PSD, blank = blank_PSD)
SNR_median = self.median(SNR_spectrum) if self.num_med != 0 else SNR_spectrum
# Close the SDR
self.sdr.close()
return freqs, SNR_median
# Returns numpy array with PSD values averaged from "num_FFT" datasets
def sample(self):
counter = 0.0
PSD_summed = (0, )* self.resolution
while (counter < self.num_FFT):
samples = self.sdr.read_samples(self.resolution)
# Applies window to samples in time domain before performing FFT
window = np.hanning(self.resolution)
windowed_samples = samples * window
# Perform FFT and PSD-analysis
PSD = np.abs(np.fft.fft(windowed_samples)/self.sdr.sample_rate)**2
PSD_checked = self.check_for_zero(PSD)
PSD_log = 10*np.log10(PSD_checked)
PSD_summed = tuple(map(operator.add, PSD_summed, np.fft.fftshift(PSD_log)))
counter += 1.0
averaged_PSD = tuple(sample/counter for sample in PSD_summed)
return averaged_PSD
# Calculates SNR from spectrum and H-line SNR
def estimate_SNR(self, data, blank):
SNR = np.array(data)-np.array(blank)
# Ghetto noise floor estimate:
noise_floor = sum(SNR[0:10])/10
shifted_SNR = SNR-noise_floor
return shifted_SNR
# Median filter for rfi-removal
def median(self, data):
for i in range(len(data)):
data[i] = np.mean(data[i:i+self.num_med])
return data
# Checks if samples have been dropped and replaces 0.0 with next value
def check_for_zero(self, PSD):
try:
index = list(PSD).index(0.0)
print('Dropped sample was recovered!')
PSD[index] = (PSD[index+1]+PSD[index-1])/2
return PSD
except:
return PSD
|
[
"rtlsdr.RtlSdr",
"numpy.fft.fft",
"numpy.mean",
"numpy.array",
"numpy.fft.fftshift",
"numpy.linspace",
"numpy.hanning",
"numpy.log10"
] |
[((446, 454), 'rtlsdr.RtlSdr', 'RtlSdr', ([], {}), '()\n', (452, 454), False, 'from rtlsdr import RtlSdr\n'), ((1237, 1303), 'numpy.linspace', 'np.linspace', ([], {'start': 'start_freq', 'stop': 'stop_freq', 'num': 'self.resolution'}), '(start=start_freq, stop=stop_freq, num=self.resolution)\n', (1248, 1303), True, 'import numpy as np\n'), ((2104, 2131), 'numpy.hanning', 'np.hanning', (['self.resolution'], {}), '(self.resolution)\n', (2114, 2131), True, 'import numpy as np\n'), ((2744, 2758), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2752, 2758), True, 'import numpy as np\n'), ((2759, 2774), 'numpy.array', 'np.array', (['blank'], {}), '(blank)\n', (2767, 2774), True, 'import numpy as np\n'), ((3043, 3076), 'numpy.mean', 'np.mean', (['data[i:i + self.num_med]'], {}), '(data[i:i + self.num_med])\n', (3050, 3076), True, 'import numpy as np\n'), ((2380, 2401), 'numpy.log10', 'np.log10', (['PSD_checked'], {}), '(PSD_checked)\n', (2388, 2401), True, 'import numpy as np\n'), ((2463, 2487), 'numpy.fft.fftshift', 'np.fft.fftshift', (['PSD_log'], {}), '(PSD_log)\n', (2478, 2487), True, 'import numpy as np\n'), ((2249, 2277), 'numpy.fft.fft', 'np.fft.fft', (['windowed_samples'], {}), '(windowed_samples)\n', (2259, 2277), True, 'import numpy as np\n')]
|
from bindsnet.network.nodes import Input, LIFNodes
from bindsnet.network.topology import Connection
from bindsnet.learning import PostPre
source_layer = Input(n=100, traces=True)
target_layer = LIFNodes(n=1000, traces=True)
connection = Connection(
source=source_layer,
target=target_layer,
update_rule=PostPre,
nu=(1e-4, 1e-2))
|
[
"bindsnet.network.nodes.Input",
"bindsnet.network.topology.Connection",
"bindsnet.network.nodes.LIFNodes"
] |
[((154, 179), 'bindsnet.network.nodes.Input', 'Input', ([], {'n': '(100)', 'traces': '(True)'}), '(n=100, traces=True)\n', (159, 179), False, 'from bindsnet.network.nodes import Input, LIFNodes\n'), ((195, 224), 'bindsnet.network.nodes.LIFNodes', 'LIFNodes', ([], {'n': '(1000)', 'traces': '(True)'}), '(n=1000, traces=True)\n', (203, 224), False, 'from bindsnet.network.nodes import Input, LIFNodes\n'), ((239, 335), 'bindsnet.network.topology.Connection', 'Connection', ([], {'source': 'source_layer', 'target': 'target_layer', 'update_rule': 'PostPre', 'nu': '(0.0001, 0.01)'}), '(source=source_layer, target=target_layer, update_rule=PostPre,\n nu=(0.0001, 0.01))\n', (249, 335), False, 'from bindsnet.network.topology import Connection\n')]
|
# C&C NLP tools
# Copyright (c) Universities of Edinburgh, Oxford and Sydney
# Copyright (c) <NAME>
#
# This software is covered by a non-commercial use licence.
# See LICENCE.txt for the full text of the licence.
#
# If LICENCE.txt is not included in this distribution
# please email <EMAIL> to obtain a copy.
from base import *
import config
import io
import model
import tagger
import ccg
def load(super, parser, load_model = True):
int_cfg = ccg.IntegrationConfig()
super_cfg = tagger.SuperConfig()
super_cfg.path.value = super
parser_cfg = ccg.ParserConfig()
parser_cfg.path.value = parser
return ccg.Integration(int_cfg, super_cfg, parser_cfg, Sentence())
def read(sent, s):
tokens = [tuple(x.split('|')) for x in s.split()]
sent.words = [t[0] for t in tokens]
sent.pos = [t[1] for t in tokens]
sent.msuper = [[t[2]] for t in tokens]
|
[
"ccg.ParserConfig",
"tagger.SuperConfig",
"ccg.IntegrationConfig"
] |
[((451, 474), 'ccg.IntegrationConfig', 'ccg.IntegrationConfig', ([], {}), '()\n', (472, 474), False, 'import ccg\n'), ((489, 509), 'tagger.SuperConfig', 'tagger.SuperConfig', ([], {}), '()\n', (507, 509), False, 'import tagger\n'), ((556, 574), 'ccg.ParserConfig', 'ccg.ParserConfig', ([], {}), '()\n', (572, 574), False, 'import ccg\n')]
|
from django.db import models
# Create your models here.
class Blog(models.Model):
"""
Represents a project model in home page.
"""
title = models.CharField(max_length=50)
description = models.CharField(max_length=150)
date = models.DateField()
def __str__(self):
return self.title
|
[
"django.db.models.CharField",
"django.db.models.DateField"
] |
[((156, 187), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (172, 187), False, 'from django.db import models\n'), ((206, 238), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (222, 238), False, 'from django.db import models\n'), ((250, 268), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (266, 268), False, 'from django.db import models\n')]
|
import telnetlib
import time
tn = telnetlib.Telnet('192.168.137.226', 5051)
#tn.write(b"Client")
time.sleep(1)
for i in range(5):
print("Now writing ?Q102")
tn.write(b"?Q102\n")
status = tn.read_until(b"\n",timeout=1).decode("utf-8")
print(f"Data received: {status}")
print("Now writing ?Q104")
tn.write(b"?Q104\n")
status2 = tn.read_until(b"\n",timeout=1).decode("utf-8")
print(f"Data received: {status2}")
print("Now writing ?Q200")
tn.write(b"?Q200\n")
status2 = tn.read_until(b"\n",timeout=1).decode("utf-8")
print(f"Data received: {status2}")
print("Now writing ?Q500")
tn.write(b"?Q500\n")
status2 = tn.read_until(b"\n",timeout=1).decode("utf-8")
print(f"Data received: {status2}")
tn.close()
|
[
"telnetlib.Telnet",
"time.sleep"
] |
[((35, 76), 'telnetlib.Telnet', 'telnetlib.Telnet', (['"""192.168.137.226"""', '(5051)'], {}), "('192.168.137.226', 5051)\n", (51, 76), False, 'import telnetlib\n'), ((98, 111), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (108, 111), False, 'import time\n')]
|
"""Define version constants."""
import re
__version__ = '1.0.0'
__version_info__ = tuple(re.split('[.-]', __version__))
|
[
"re.split"
] |
[((91, 120), 're.split', 're.split', (['"""[.-]"""', '__version__'], {}), "('[.-]', __version__)\n", (99, 120), False, 'import re\n')]
|
#!/usr/bin/env python
# Copyright 2013 Netflix
"""Push all repos to stash
"""
from nflx_oc.commands.dev.repos import run_for_all_repos
def main():
run_for_all_repos('git push origin master')
|
[
"nflx_oc.commands.dev.repos.run_for_all_repos"
] |
[((156, 199), 'nflx_oc.commands.dev.repos.run_for_all_repos', 'run_for_all_repos', (['"""git push origin master"""'], {}), "('git push origin master')\n", (173, 199), False, 'from nflx_oc.commands.dev.repos import run_for_all_repos\n')]
|
from __future__ import print_function
import numpy as np
def faces_with_repeated_vertices(f):
if f.shape[1] == 3:
return np.unique(np.concatenate([
np.where(f[:, 0] == f[:, 1])[0],
np.where(f[:, 0] == f[:, 2])[0],
np.where(f[:, 1] == f[:, 2])[0],
]))
else:
return np.unique(np.concatenate([
np.where(f[:, 0] == f[:, 1])[0],
np.where(f[:, 0] == f[:, 2])[0],
np.where(f[:, 0] == f[:, 3])[0],
np.where(f[:, 1] == f[:, 2])[0],
np.where(f[:, 1] == f[:, 3])[0],
np.where(f[:, 2] == f[:, 3])[0],
]))
def faces_with_out_of_range_vertices(f, v):
return np.unique(np.concatenate([
np.where(f < 0)[0],
np.where(f >= len(v))[0],
]))
def check_integrity(mesh):
errors = []
for f_index in faces_with_out_of_range_vertices(mesh.f, mesh.v):
errors.append(("f", f_index, "Vertex out of range"))
for f_index in faces_with_repeated_vertices(mesh.f):
errors.append(("f", f_index, "Repeated vertex"))
return errors
def print_integrity_errors(errors, mesh):
for attr, index, message in errors:
try:
data = getattr(mesh, attr)[index]
except (AttributeError, IndexError):
data = ''
print("{} {} {} {}".format(attr, index, message, data))
|
[
"numpy.where"
] |
[((734, 749), 'numpy.where', 'np.where', (['(f < 0)'], {}), '(f < 0)\n', (742, 749), True, 'import numpy as np\n'), ((174, 202), 'numpy.where', 'np.where', (['(f[:, 0] == f[:, 1])'], {}), '(f[:, 0] == f[:, 1])\n', (182, 202), True, 'import numpy as np\n'), ((219, 247), 'numpy.where', 'np.where', (['(f[:, 0] == f[:, 2])'], {}), '(f[:, 0] == f[:, 2])\n', (227, 247), True, 'import numpy as np\n'), ((264, 292), 'numpy.where', 'np.where', (['(f[:, 1] == f[:, 2])'], {}), '(f[:, 1] == f[:, 2])\n', (272, 292), True, 'import numpy as np\n'), ((373, 401), 'numpy.where', 'np.where', (['(f[:, 0] == f[:, 1])'], {}), '(f[:, 0] == f[:, 1])\n', (381, 401), True, 'import numpy as np\n'), ((418, 446), 'numpy.where', 'np.where', (['(f[:, 0] == f[:, 2])'], {}), '(f[:, 0] == f[:, 2])\n', (426, 446), True, 'import numpy as np\n'), ((463, 491), 'numpy.where', 'np.where', (['(f[:, 0] == f[:, 3])'], {}), '(f[:, 0] == f[:, 3])\n', (471, 491), True, 'import numpy as np\n'), ((508, 536), 'numpy.where', 'np.where', (['(f[:, 1] == f[:, 2])'], {}), '(f[:, 1] == f[:, 2])\n', (516, 536), True, 'import numpy as np\n'), ((553, 581), 'numpy.where', 'np.where', (['(f[:, 1] == f[:, 3])'], {}), '(f[:, 1] == f[:, 3])\n', (561, 581), True, 'import numpy as np\n'), ((598, 626), 'numpy.where', 'np.where', (['(f[:, 2] == f[:, 3])'], {}), '(f[:, 2] == f[:, 3])\n', (606, 626), True, 'import numpy as np\n')]
|
import unittest
import os
from icalendar import Calendar
import random
import string
from datetime import timedelta, datetime as dt
import pytz
from util import Singleton
from .. import CalService, CalRemote, iCloudCaldavRemote, Event
@Singleton
class CalMockRemote(CalRemote):
def create_calendar(self):
self.calendar = Calendar()
self.calendar.add("prodid", "-//My calendar product//mxm.dk//")
self.calendar.add("version", "2.0")
def __init__(self):
self.create_calendar()
def add_event(self, event: Event):
self.calendar.add_component(event)
def events(self):
events = self.calendar.subcomponents
return list(map(lambda e: Event(e), events))
def purge(self):
self.create_calendar()
def date_search(self, start, end=None):
events = self.events()
if end is None:
end = pytz.utc.localize(dt.max)
def _starts_between(e: Event, start, end):
return end > e["dtstart"].dt and e["dtstart"].dt > start
return list(filter(lambda e: _starts_between(e, start, end), events))
class TestCalService(unittest.TestCase):
@classmethod
def setUpClass(self):
if "DONOTMOCK" in os.environ:
purgable_calendar = os.getenv("CALDAV_PURGABLE_CALENDAR")
self.cal_service = CalService.instance(
iCloudCaldavRemote.instance(purgable_calendar)
)
else:
self.cal_service = CalService.instance(CalMockRemote.instance())
print("Mocking Remote...")
def setUp(self):
self.cal_service.purge()
def now(self):
return pytz.utc.localize(dt.now())
def test_cant_add_with_too_few_params(self):
summary = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
event = Event()
event.add("summary", summary)
self.assertRaises(Exception, self.cal_service.add_event, event)
def test_add_and_get_event(self):
summary = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
event = Event()
event.add("summary", summary)
event.add("dtstart", pytz.utc.localize(dt(2020, 2, 26, 18, 00)))
event.add("dtend", pytz.utc.localize(dt(2020, 2, 26, 19, 00)))
event.add("location", "My Hood")
event.set_reminder(timedelta(minutes=10))
self.cal_service.add_event(event)
all_events = self.cal_service.get_all_events()
self.assertTrue(len(all_events) > 0)
self.assertIsInstance(all_events[0], Event)
self.assertTrue(any(e["summary"] == summary for e in all_events))
def test_get_events_between(self):
event = Event()
summary = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
event.add("summary", summary)
event.add("dtstart", self.now() + timedelta(minutes=2))
event.add("dtend", self.now() + timedelta(minutes=12))
self.cal_service.add_event(event)
start = self.now()
end = self.now() + timedelta(minutes=15)
all_events = self.cal_service.get_events_between(start, end)
self.assertTrue(len(all_events) > 0)
self.assertIsInstance(all_events[0], Event)
self.assertTrue(any(e["summary"] == summary for e in all_events))
def test_get_next_events(self):
event = Event()
summary = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
event.add("summary", summary)
event.add("dtstart", self.now() + timedelta(minutes=1))
event.add("dtend", self.now() + timedelta(minutes=10))
self.cal_service.add_event(event)
event2 = Event()
summary2 = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
event2.add("summary", summary2)
event2.add("dtstart", self.now() + timedelta(minutes=2))
event2.add("dtend", self.now() + timedelta(minutes=10))
self.cal_service.add_event(event2)
next_events = self.cal_service.get_next_events()
self.assertIsInstance(next_events[0], Event)
self.assertEqual(next_events[0]["summary"], summary)
self.assertEqual(next_events[1]["summary"], summary2)
def test_get_max_available_time_between(self):
def _chop_dt(date: dt):
return date.replace(microsecond=0)
start_time = self.now()
end_time = self.now() + timedelta(hours=4)
with self.subTest("no events today"):
max_time, before, after = self.cal_service.get_max_available_time_between(
start_time, end_time
)
self.assertEqual(max_time, end_time - start_time)
self.assertEqual(before, start_time)
self.assertEqual(after, end_time)
event1 = Event()
summary = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
event1.add("summary", summary)
event1.add("dtstart", start_time + timedelta(minutes=15))
event1.add("dtend", start_time + timedelta(minutes=30))
self.cal_service.add_event(event1)
# which is 30 minutes from
event2 = Event()
summary2 = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
event2_start_time = event1.get_end() + timedelta(minutes=30)
event2.add("summary", summary2)
event2.add("dtstart", event2_start_time)
event2.add("dtend", event2_start_time + timedelta(minutes=15))
self.cal_service.add_event(event2)
with self.subTest(msg="rest of the day is empty"):
max_time, before, after = self.cal_service.get_max_available_time_between(
start_time, end_time
)
self.assertGreater(max_time, timedelta(minutes=30))
self.assertEqual(_chop_dt(before), _chop_dt(event2.get_end()))
self.assertEqual(after, end_time)
with self.subTest(msg="rest of the day with events of shorter delta"):
# each of which are 15 minutes apart
next_event_start_time = event2.get_end() + timedelta(minutes=15)
while next_event_start_time < end_time:
next_ev_summary = "".join(
random.choices(string.ascii_uppercase + string.digits, k=6)
)
next_event = Event()
next_event.add("summary", next_event)
next_event.add("dtstart", next_event_start_time)
next_event.add("dtend", next_event_start_time + timedelta(minutes=15))
self.cal_service.add_event(next_event)
next_event_start_time = next_event.get_end() + timedelta(minutes=15)
max_time, before, after = self.cal_service.get_max_available_time_between(
start_time, end_time
)
self.assertEqual(timedelta(minutes=30), max_time)
self.assertEqual(_chop_dt(before), _chop_dt(event1.get_end()))
self.assertEqual(_chop_dt(after), _chop_dt(event2.get_start()))
|
[
"random.choices",
"datetime.datetime",
"pytz.utc.localize",
"icalendar.Calendar",
"datetime.timedelta",
"datetime.datetime.now",
"os.getenv"
] |
[((336, 346), 'icalendar.Calendar', 'Calendar', ([], {}), '()\n', (344, 346), False, 'from icalendar import Calendar\n'), ((894, 919), 'pytz.utc.localize', 'pytz.utc.localize', (['dt.max'], {}), '(dt.max)\n', (911, 919), False, 'import pytz\n'), ((1276, 1313), 'os.getenv', 'os.getenv', (['"""CALDAV_PURGABLE_CALENDAR"""'], {}), "('CALDAV_PURGABLE_CALENDAR')\n", (1285, 1313), False, 'import os\n'), ((1681, 1689), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (1687, 1689), True, 'from datetime import timedelta, datetime as dt\n'), ((1767, 1826), 'random.choices', 'random.choices', (['(string.ascii_uppercase + string.digits)'], {'k': '(6)'}), '(string.ascii_uppercase + string.digits, k=6)\n', (1781, 1826), False, 'import random\n'), ((2027, 2086), 'random.choices', 'random.choices', (['(string.ascii_uppercase + string.digits)'], {'k': '(6)'}), '(string.ascii_uppercase + string.digits, k=6)\n', (2041, 2086), False, 'import random\n'), ((2362, 2383), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(10)'}), '(minutes=10)\n', (2371, 2383), False, 'from datetime import timedelta, datetime as dt\n'), ((2744, 2803), 'random.choices', 'random.choices', (['(string.ascii_uppercase + string.digits)'], {'k': '(6)'}), '(string.ascii_uppercase + string.digits, k=6)\n', (2758, 2803), False, 'import random\n'), ((3067, 3088), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(15)'}), '(minutes=15)\n', (3076, 3088), False, 'from datetime import timedelta, datetime as dt\n'), ((3416, 3475), 'random.choices', 'random.choices', (['(string.ascii_uppercase + string.digits)'], {'k': '(6)'}), '(string.ascii_uppercase + string.digits, k=6)\n', (3430, 3475), False, 'import random\n'), ((3737, 3796), 'random.choices', 'random.choices', (['(string.ascii_uppercase + string.digits)'], {'k': '(6)'}), '(string.ascii_uppercase + string.digits, k=6)\n', (3751, 3796), False, 'import random\n'), ((4440, 4458), 'datetime.timedelta', 'timedelta', ([], {'hours': '(4)'}), '(hours=4)\n', (4449, 4458), False, 'from datetime import timedelta, datetime as dt\n'), ((4853, 4912), 'random.choices', 'random.choices', (['(string.ascii_uppercase + string.digits)'], {'k': '(6)'}), '(string.ascii_uppercase + string.digits, k=6)\n', (4867, 4912), False, 'import random\n'), ((5214, 5273), 'random.choices', 'random.choices', (['(string.ascii_uppercase + string.digits)'], {'k': '(6)'}), '(string.ascii_uppercase + string.digits, k=6)\n', (5228, 5273), False, 'import random\n'), ((5322, 5343), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (5331, 5343), False, 'from datetime import timedelta, datetime as dt\n'), ((2197, 2219), 'datetime.datetime', 'dt', (['(2020)', '(2)', '(26)', '(18)', '(0)'], {}), '(2020, 2, 26, 18, 0)\n', (2199, 2219), True, 'from datetime import timedelta, datetime as dt\n'), ((2268, 2290), 'datetime.datetime', 'dt', (['(2020)', '(2)', '(26)', '(19)', '(0)'], {}), '(2020, 2, 26, 19, 0)\n', (2270, 2290), True, 'from datetime import timedelta, datetime as dt\n'), ((2885, 2905), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(2)'}), '(minutes=2)\n', (2894, 2905), False, 'from datetime import timedelta, datetime as dt\n'), ((2947, 2968), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(12)'}), '(minutes=12)\n', (2956, 2968), False, 'from datetime import timedelta, datetime as dt\n'), ((3557, 3577), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (3566, 3577), False, 'from datetime import timedelta, datetime as dt\n'), ((3619, 3640), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(10)'}), '(minutes=10)\n', (3628, 3640), False, 'from datetime import timedelta, datetime as dt\n'), ((3881, 3901), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(2)'}), '(minutes=2)\n', (3890, 3901), False, 'from datetime import timedelta, datetime as dt\n'), ((3944, 3965), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(10)'}), '(minutes=10)\n', (3953, 3965), False, 'from datetime import timedelta, datetime as dt\n'), ((4996, 5017), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(15)'}), '(minutes=15)\n', (5005, 5017), False, 'from datetime import timedelta, datetime as dt\n'), ((5060, 5081), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (5069, 5081), False, 'from datetime import timedelta, datetime as dt\n'), ((5481, 5502), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(15)'}), '(minutes=15)\n', (5490, 5502), False, 'from datetime import timedelta, datetime as dt\n'), ((5786, 5807), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (5795, 5807), False, 'from datetime import timedelta, datetime as dt\n'), ((6114, 6135), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(15)'}), '(minutes=15)\n', (6123, 6135), False, 'from datetime import timedelta, datetime as dt\n'), ((6881, 6902), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (6890, 6902), False, 'from datetime import timedelta, datetime as dt\n'), ((6251, 6310), 'random.choices', 'random.choices', (['(string.ascii_uppercase + string.digits)'], {'k': '(6)'}), '(string.ascii_uppercase + string.digits, k=6)\n', (6265, 6310), False, 'import random\n'), ((6691, 6712), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(15)'}), '(minutes=15)\n', (6700, 6712), False, 'from datetime import timedelta, datetime as dt\n'), ((6549, 6570), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(15)'}), '(minutes=15)\n', (6558, 6570), False, 'from datetime import timedelta, datetime as dt\n')]
|
# coding=utf-8
import itertools
from contracts.utils import raise_wrapped
from nose.tools import nottest
from geometry import MatrixLieGroup, RandomManifold, all_manifolds, logger
from .checks_generation import *
def list_manifolds():
return all_manifolds
@nottest
def get_test_points(M, num_random=2):
interesting = M.interesting_points()
if isinstance(M, RandomManifold):
for i in range(num_random): # @UnusedVariable
interesting.append(M.sample_uniform())
if len(interesting) == 0:
logger.warning('No test points for %s and not random.' % M)
return interesting
def list_manifold_point():
""" Yields all possible (M, point, i, num) tests we have """
for M in list_manifolds():
interesting = get_test_points(M)
num_examples = len(interesting)
for i in range(num_examples):
point = interesting[i]
try:
M.belongs(point)
except Exception as e:
msg = 'M %s does not contain %s: %s' % (M, point, e)
raise_wrapped(Exception, e, msg)
yield M, point, i, num_examples
def list_mgroup():
""" Yields all possible (M, point, i, num) tests we have """
for M in list_manifolds():
if not isinstance(M, MatrixLieGroup):
continue
yield M
def list_mgroup_point():
""" Yields all possible (M, point, i, num) tests we have """
for M in list_mgroup():
interesting = get_test_points(M)
num_examples = len(interesting)
for i in range(num_examples):
point = interesting[i]
try:
M.belongs(point)
except Exception as e:
msg = 'M %s does not contain %s: %s' % (M, point, e)
raise_wrapped(Exception, e, msg)
yield M, point, i, num_examples
def list_manifold_points():
""" Yields all possible (M, point1, point2, i, num) tests we have """
for M in list_manifolds():
interesting = get_test_points(M)
num_examples = len(interesting) * len(interesting)
k = 0
for p1, p2 in itertools.product(interesting, interesting):
yield M, p1, p2, k, num_examples
k += 1
for_all_manifolds = fancy_test_decorator(lister=lambda: all_manifolds,
arguments=lambda M: (M,),
attributes=lambda M: dict(manifolds=1, manifold=str(M)))
def _args0(x):
(M, p, i, n) = x
return M, p
def _attrs0(x):
(M, p, i, n) = x
return dict(manifolds=1,
manifold=M,
point=p)
for_all_manifold_point = fancy_test_decorator(lister=list_manifold_point,
arguments=_args0,
attributes=_attrs0)
def _args1(x):
(M, p, i, n) = x
return M, p
def _attrs1(x):
(M, p, i, n) = x
return dict(manifolds=1,
matrixgroups=1,
manifold=M, point=p)
for_all_mgroup_point = fancy_test_decorator(lister=list_mgroup_point,
arguments=_args1,
attributes=_attrs1)
for_all_mgroup = fancy_test_decorator(lister=list_mgroup,
arguments=lambda M: (M,),
attributes=lambda M: dict(manifolds=1, matrixgroups=1,
manifold=M))
def _args(x):
(M, p1, p2, k, n) = x
return M, p1, p2
def _attrs(x):
(M, p1, p2, k, n) = x
return dict(type='manifolds', manifold=M, point1=p1, point2=p2)
for_all_manifold_pairs = fancy_test_decorator(lister=list_manifold_points,
arguments=_args,
attributes=_attrs)
|
[
"geometry.logger.warning",
"contracts.utils.raise_wrapped",
"itertools.product"
] |
[((537, 596), 'geometry.logger.warning', 'logger.warning', (["('No test points for %s and not random.' % M)"], {}), "('No test points for %s and not random.' % M)\n", (551, 596), False, 'from geometry import MatrixLieGroup, RandomManifold, all_manifolds, logger\n'), ((2139, 2182), 'itertools.product', 'itertools.product', (['interesting', 'interesting'], {}), '(interesting, interesting)\n', (2156, 2182), False, 'import itertools\n'), ((1069, 1101), 'contracts.utils.raise_wrapped', 'raise_wrapped', (['Exception', 'e', 'msg'], {}), '(Exception, e, msg)\n', (1082, 1101), False, 'from contracts.utils import raise_wrapped\n'), ((1791, 1823), 'contracts.utils.raise_wrapped', 'raise_wrapped', (['Exception', 'e', 'msg'], {}), '(Exception, e, msg)\n', (1804, 1823), False, 'from contracts.utils import raise_wrapped\n')]
|
import collections
import heapq
from typing import List
def find_town_judge(n: int, trust: List[List[int]]) -> int:
trusts = {i + 1: 0 for i in range(n)}
outgoing = {i + 1 for i in range(n)}
for origin, destination in trust:
if origin in outgoing:
outgoing.remove(origin)
trusts[destination] += 1
if len(outgoing) == 1 and trusts[list(outgoing)[0]] == n - 1:
return list(outgoing)[0]
return -1
def all_paths_source_to_target(graph):
result = []
def traverse(node_id, path, visited):
if node_id in visited:
return 1
visited.add(node_id)
if node_id == len(graph) - 1:
result.append(path)
else:
if any([traverse(adjacent, path + [adjacent], visited | {node_id}) for adjacent in graph[node_id]]):
return 1
return 0
if traverse(0, [0], set()) == 1:
return []
return result
def minimum_vertices_reach_all_nodes(n: int, edges: List[List[int]]) -> List[int]:
in_degree = {i: 0 for i in range(n)}
for origin, destination in edges:
in_degree[destination] += 1
return list(filter(lambda x: in_degree[x] == 0, in_degree.keys()))
def keys_and_rooms(rooms: List[List[int]]) -> bool:
visited = {0}
queue = collections.deque([0])
while queue:
node_id = queue.popleft()
for adjacent in rooms[node_id]:
if adjacent not in visited:
visited.add(adjacent)
queue.append(adjacent)
return len(rooms) == len(visited)
def number_of_provinces(is_connected):
parents = [i for i in range(len(is_connected))]
rank = [1 for _ in range(len(is_connected))]
def find(node_id):
if parents[node_id] != node_id:
parents[node_id] = find(parents[node_id])
return parents[node_id]
def union(node_a, node_b):
parent_a = find(node_a)
parent_b = find(node_b)
if parent_a == parent_b:
return
rank_a = rank[parent_a]
rank_b = rank[parent_b]
if rank_a > rank_b:
parents[parent_b] = parent_a
rank[parent_a] += 1
else:
parents[parent_a] = parent_b
rank[parent_b] += 1
for x, row in enumerate(is_connected):
for y, value in enumerate(row):
if y > x and value == 1:
union(x, y)
for x in range(len(is_connected)):
find(x)
return len(set(parents))
def redundant_connections(edges):
parents = [i for i in range(len(edges) + 1)]
rank = [1 for _ in range(len(edges) + 1)]
def find(node_id):
if parents[node_id] != node_id:
parents[node_id] = find(parents[node_id])
return parents[node_id]
def union(node_a, node_b):
parent_a = find(node_a)
parent_b = find(node_b)
if parent_a == parent_b:
return True
rank_a = rank[parent_a]
rank_b = rank[parent_b]
if rank_a > rank_b:
rank[parent_a] += 1
parents[node_b] = parent_a
else:
parents[node_a] = parent_b
rank[parent_b] += 1
return False
result = []
for origin, destination in edges:
if union(origin, destination):
result = [origin, destination]
return result
def maximal_network_rank(n, roads):
def get_graph():
graph = collections.defaultdict(set)
for origin, destination in roads:
graph[origin].add(destination)
graph[destination].add(origin)
return graph
graph = get_graph()
max_rank = 0
for x in range(n):
for y in range(x + 1, n):
rank = len(graph[x]) + len(graph[y])
if x in graph[y]:
rank -= 1
max_rank = max(max_rank, rank)
return max_rank
def find_eventual_safe_nodes(graph):
safe = set()
unsafe = set()
def traverse(node_id, visited):
if node_id in visited:
unsafe.add(node_id)
return False
for adjacent in graph[node_id]:
if adjacent in unsafe:
unsafe.add(node_id)
return False
if adjacent not in safe and not traverse(adjacent, visited | {node_id}):
unsafe.add(node_id)
return False
safe.add(node_id)
return True
for node_id in range(len(graph)):
if node_id not in safe and node_id not in unsafe:
traverse(node_id, set())
return list(safe)
def is_graph_bipartite(graph):
colors = collections.defaultdict(bool)
def traverse(node_id, color):
colors[node_id] = color
for adjacent in graph[node_id]:
if adjacent in colors and colors[adjacent] == color:
return False
if adjacent not in colors and not traverse(adjacent, not color):
return False
return True
for node_id in range(len(graph)):
if node_id not in colors:
if not traverse(node_id, True):
return False
return True
def flower_planting_no_adjacent(n, paths):
flowers = collections.defaultdict(int)
flower_colors = {1, 2, 3, 4}
def get_graph():
graph = collections.defaultdict(list)
for origin, destination in paths:
graph[origin].append(destination)
graph[destination].append(origin)
return graph
graph = get_graph()
def get_color(node_id):
colors = set()
for adjacent in graph[node_id]:
if adjacent in flowers:
colors.add(flowers[adjacent])
return list(flower_colors.difference(colors))[0]
def traverse(node_id):
flowers[node_id] = get_color(node_id)
for adjacent in graph[node_id]:
if adjacent not in flowers:
traverse(adjacent)
for node_id in range(1, n + 1):
if node_id not in flowers:
traverse(node_id)
result = [None for _ in range(n)]
for key, value in flowers.items():
result[key - 1] = value
return result
def network_delay_time(times, n, k):
queue = [[0, k]]
visited = set()
def get_graph():
graph = collections.defaultdict(list)
for origin, destination, weight in times:
graph[origin].append([weight, destination])
return graph
graph = get_graph()
while queue:
total_time, node_id = heapq.heappop(queue)
visited.add(node_id)
if len(visited) == n:
return total_time
for adjacent_weight, adjacent_node in graph[node_id]:
if adjacent_node not in visited:
heapq.heappush(queue, [total_time + adjacent_weight, adjacent_node])
return -1
def course_schedule_two(num_courses, prerequisites):
def get_graph():
graph = collections.defaultdict(list)
in_degree = {x: 0 for x in range(num_courses)}
for destination, origin in prerequisites:
graph[origin].append(destination)
in_degree[destination] += 1
return graph, in_degree
graph, in_degree = get_graph()
queue = collections.deque(list(filter(lambda x: in_degree[x] == 0, in_degree.keys())))
result = []
while queue:
node_id = queue.popleft()
result.append(node_id)
for adjacent in graph[node_id]:
in_degree[adjacent] -= 1
if in_degree[adjacent] == 0:
queue.append(adjacent)
if len(result) == num_courses:
return result
return []
def calcEquation(equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
def get_graph():
graph = collections.defaultdict(list)
for [origin, destination], value in zip(equations, values):
graph[origin].append([value, destination])
graph[destination].append([1 / value, origin])
return graph
graph = get_graph()
def traverse(node_id, target_node, temp_result, visited):
if node_id == target_node:
return temp_result
for weight, adjacent in graph[node_id]:
if adjacent not in visited:
result = traverse(adjacent, target_node, temp_result * weight, visited | {node_id})
if result != -1:
return result
return -1
result = []
for node_id, target_id in queries:
if node_id not in graph or target_id not in graph:
result.append(float(-1))
else:
result.append(traverse(node_id, target_id, 1, set()))
return result
def numBusesToDestination(routes: List[List[int]], source: int, target: int) -> int:
def get_graph():
bus_graph = collections.defaultdict(list)
stop_graph = collections.defaultdict(list)
for i, stops in enumerate(routes):
for stop in stops:
bus_graph[i + 1].append(stop)
stop_graph[stop].append(i + 1)
return bus_graph, stop_graph
bus_graph, stop_graph = get_graph()
bus_visited, stop_visited = set(), set()
queue = collections.deque([[0, source, 0]])
while queue:
total, location_id, turn = queue.popleft()
if turn == 0:
if location_id == target:
return total
for adjacent in stop_graph[location_id]:
if adjacent not in bus_visited:
bus_visited.add(adjacent)
queue.append([total + 1, adjacent, 1])
else:
for adjacent in bus_graph[location_id]:
if adjacent not in stop_visited:
stop_visited.add(adjacent)
queue.append([total, adjacent, 0])
return -1
def kSimilarity(s1: str, s2: str) -> int:
visited = set()
def get_neighbors(input_string):
neighbors = []
for x in range(len(input_string)):
for y in range(x + 1, len(input_string)):
temp_string = list(input_string)
temp_string[x], temp_string[y] = temp_string[y], temp_string[x]
neighbors.append(''.join(temp_string))
return neighbors
queue = collections.deque([[0, s1]])
visited.add(s1)
while queue:
value, input_string = queue.popleft()
if input_string == s2:
return value
for neighbor in get_neighbors(input_string):
if neighbor not in visited:
visited.add(neighbor)
queue.append([value + 1, neighbor])
return -1
def ladderLength(beginWord: str, endWord: str, wordList: List[str]) -> int:
def get_graph():
graph = collections.defaultdict(list)
for word in wordList + [beginWord]:
for i, letter in enumerate(word):
graph[word[:i] + '*' + word[i + 1:]].append(word)
return graph
if endWord not in wordList:
return -1
graph = get_graph()
visited = {beginWord}
queue = collections.deque([[1, beginWord]])
while queue:
distance, word = queue.popleft()
if word == endWord:
return distance + 1
for i, letter in enumerate(word):
transform = word[:i] + '*' + word[i + 1:]
for word in graph[transform]:
if word not in visited:
visited.add(word)
queue.append([distance + 1, word])
return -1
|
[
"collections.defaultdict",
"heapq.heappush",
"collections.deque",
"heapq.heappop"
] |
[((1303, 1325), 'collections.deque', 'collections.deque', (['[0]'], {}), '([0])\n', (1320, 1325), False, 'import collections\n'), ((4619, 4648), 'collections.defaultdict', 'collections.defaultdict', (['bool'], {}), '(bool)\n', (4642, 4648), False, 'import collections\n'), ((5197, 5225), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (5220, 5225), False, 'import collections\n'), ((9187, 9222), 'collections.deque', 'collections.deque', (['[[0, source, 0]]'], {}), '([[0, source, 0]])\n', (9204, 9222), False, 'import collections\n'), ((10261, 10289), 'collections.deque', 'collections.deque', (['[[0, s1]]'], {}), '([[0, s1]])\n', (10278, 10289), False, 'import collections\n'), ((11061, 11096), 'collections.deque', 'collections.deque', (['[[1, beginWord]]'], {}), '([[1, beginWord]])\n', (11078, 11096), False, 'import collections\n'), ((3433, 3461), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (3456, 3461), False, 'import collections\n'), ((5297, 5326), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (5320, 5326), False, 'import collections\n'), ((6275, 6304), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (6298, 6304), False, 'import collections\n'), ((6504, 6524), 'heapq.heappop', 'heapq.heappop', (['queue'], {}), '(queue)\n', (6517, 6524), False, 'import heapq\n'), ((6912, 6941), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (6935, 6941), False, 'import collections\n'), ((7765, 7794), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (7788, 7794), False, 'import collections\n'), ((8804, 8833), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (8827, 8833), False, 'import collections\n'), ((8855, 8884), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (8878, 8884), False, 'import collections\n'), ((10741, 10770), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (10764, 10770), False, 'import collections\n'), ((6737, 6805), 'heapq.heappush', 'heapq.heappush', (['queue', '[total_time + adjacent_weight, adjacent_node]'], {}), '(queue, [total_time + adjacent_weight, adjacent_node])\n', (6751, 6805), False, 'import heapq\n')]
|
# encoding: utf-8
import re
import base64
from ..utils import int_or_none
from ..extractor.arte import ArteTVBaseIE
from ..compat import (
compat_str,
)
from ..utils import (
ExtractorError,
int_or_none,
qualities,
try_get,
unified_strdate,
)
def _extract_from_json_url(self, json_url, video_id, lang, title=None):
info = self._download_json(json_url, video_id)
player_info = info['videoJsonPlayer']
vsr = try_get(player_info, lambda x: x['VSR'], dict)
if not vsr:
error = None
if try_get(player_info, lambda x: x['custom_msg']['type']) == 'error':
error = try_get(
player_info, lambda x: x['custom_msg']['msg'], compat_str)
if not error:
error = 'Video %s is not available' % player_info.get('VID') or video_id
raise ExtractorError(error, expected=True)
upload_date_str = player_info.get('shootingDate')
if not upload_date_str:
upload_date_str = (player_info.get('VRA') or player_info.get('VDA') or '').split(' ')[0]
title = (player_info.get('VTI') or title or player_info['VID']).strip()
subtitle = player_info.get('VSU', '').strip()
if subtitle:
title += ' - %s' % subtitle
info_dict = {
'id': player_info['VID'],
'title': title,
'description': player_info.get('VDE'),
'upload_date': unified_strdate(upload_date_str),
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
}
qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ'])
LANGS = {
'fr': 'F',
'de': 'A',
'en': 'E[ANG]',
'es': 'E[ESP]',
}
langcode = LANGS.get(lang, lang)
formats = []
temp = {format_id : format_dict for format_id, format_dict in list(vsr.items()) if dict(format_dict).get('versionShortLibelle').lower() == lang}
if temp:
vsr = temp
for format_id, format_dict in list(vsr.items()):
f = dict(format_dict)
versionCode = f.get('versionCode')
l = re.escape(langcode)
# Language preference from most to least priority
# Reference: section 5.6.3 of
# http://www.arte.tv/sites/en/corporate/files/complete-technical-guidelines-arte-geie-v1-05.pdf
PREFERENCES = (
# original version in requested language, without subtitles
r'VO{0}$'.format(l),
# original version in requested language, with partial subtitles in requested language
r'VO{0}-ST{0}$'.format(l),
# original version in requested language, with subtitles for the deaf and hard-of-hearing in requested language
r'VO{0}-STM{0}$'.format(l),
# non-original (dubbed) version in requested language, without subtitles
r'V{0}$'.format(l),
# non-original (dubbed) version in requested language, with subtitles partial subtitles in requested language
r'V{0}-ST{0}$'.format(l),
# non-original (dubbed) version in requested language, with subtitles for the deaf and hard-of-hearing in requested language
r'V{0}-STM{0}$'.format(l),
# original version in requested language, with partial subtitles in different language
r'VO{0}-ST(?!{0}).+?$'.format(l),
# original version in requested language, with subtitles for the deaf and hard-of-hearing in different language
r'VO{0}-STM(?!{0}).+?$'.format(l),
# original version in different language, with partial subtitles in requested language
r'VO(?:(?!{0}).+?)?-ST{0}$'.format(l),
# original version in different language, with subtitles for the deaf and hard-of-hearing in requested language
r'VO(?:(?!{0}).+?)?-STM{0}$'.format(l),
# original version in different language, without subtitles
r'VO(?:(?!{0}))?$'.format(l),
# original version in different language, with partial subtitles in different language
r'VO(?:(?!{0}).+?)?-ST(?!{0}).+?$'.format(l),
# original version in different language, with subtitles for the deaf and hard-of-hearing in different language
r'VO(?:(?!{0}).+?)?-STM(?!{0}).+?$'.format(l),
)
for pref, p in enumerate(PREFERENCES):
if re.match(p, versionCode):
lang_pref = len(PREFERENCES) - pref
break
else:
lang_pref = -1
format = {
'format_id': format_id,
'preference': -10 if f.get('videoFormat') == 'M3U8' else None,
'language_preference': lang_pref,
'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'tbr': int_or_none(f.get('bitrate')),
'quality': qfunc(f.get('quality')),
}
if f.get('mediaType') == 'rtmp':
format['url'] = f['streamer']
format['play_path'] = 'mp4:' + f['url']
format['ext'] = 'flv'
else:
format['url'] = f['url']
formats.append(format)
self._check_formats(formats, video_id)
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
ArteTVBaseIE._extract_from_json_url = _extract_from_json_url
|
[
"re.match",
"re.escape"
] |
[((2047, 2066), 're.escape', 're.escape', (['langcode'], {}), '(langcode)\n', (2056, 2066), False, 'import re\n'), ((4321, 4345), 're.match', 're.match', (['p', 'versionCode'], {}), '(p, versionCode)\n', (4329, 4345), False, 'import re\n')]
|
import os
ls=["python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_0_CoarseDropout.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_1_CoarseDropout.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_2_CoarseDropout.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_3_CoarseDropout.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_4_CoarseDropout.yml",
]
for l in ls:
os.system(l)
|
[
"os.system"
] |
[((553, 565), 'os.system', 'os.system', (['l'], {}), '(l)\n', (562, 565), False, 'import os\n')]
|
import argparse
import numpy as np
from packaging import version
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="2,3"
from PIL import Image
import matplotlib.pyplot as plt
import cv2
from skimage.transform import rotate
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.utils import data
from models.unet import UNet
from dataset.refuge import REFUGE
NUM_CLASSES = 3
NUM_STEPS = 512 # Number of images in the validation set.
RESTORE_FROM = '/home/charlietran/CADA_Tutorial/Model_Weights/Trial1/UNet1000_v18_weightedclass.pth'
SAVE_PATH = '/home/charlietran/CADA_Tutorial/result/Trial1/'
MODEL = 'Unet'
BATCH_SIZE = 1
is_polar = False #If need to transfer the image and labels to polar coordinates: MICCAI version is False
ROI_size = 700 #ROI size
from evaluation.evaluation_segmentation import *
print(RESTORE_FROM)
palette=[
255, 255, 255, # black background
128, 128, 128, # index 1 is red
0, 0, 0, # index 2 is yellow
0, 0 , 0 # index 3 is orange
]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Unet Network")
parser.add_argument("--model", type=str, default=MODEL,
help="Model Choice Unet.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--restore-from", type=str, default=RESTORE_FROM,
help="Where restore model parameters from.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Number of images sent to the network in one step.")
parser.add_argument("--gpu", type=int, default=0,
help="choose gpu device.")
parser.add_argument("--save", type=str, default=SAVE_PATH,
help="Path to save result.")
parser.add_argument("--is_polar", type=bool, default=False,
help="If proceed images in polar coordinate. MICCAI version is false")
parser.add_argument("--ROI_size", type=int, default=460,
help="Size of ROI.")
parser.add_argument('--t', type=int, default=3, help='t for Recurrent step of R2U_Net or R2AttU_Net')
return parser.parse_args()
def main():
"""Create the model and start the evaluation process."""
args = get_arguments()
gpu0 = args.gpu
if not os.path.exists(args.save):
os.makedirs(args.save)
model = UNet(3, n_classes=args.num_classes)
saved_state_dict = torch.load(args.restore_from)
model.load_state_dict(saved_state_dict)
model.cuda(gpu0)
model.train()
testloader = data.DataLoader(REFUGE(False, domain='REFUGE_TEST', is_transform=True),
batch_size=args.batch_size, shuffle=False, pin_memory=True)
if version.parse(torch.__version__) >= version.parse('0.4.0'):
interp = nn.Upsample(size=(ROI_size, ROI_size), mode='bilinear', align_corners=True)
else:
interp = nn.Upsample(size=(ROI_size, ROI_size), mode='bilinear')
for index, batch in enumerate(testloader):
if index % 100 == 0:
print('%d processd' % index)
image, label, _, _, name = batch
if args.model == 'Unet':
_,_,_,_, output2 = model(Variable(image, volatile=True).cuda(gpu0))
output = interp(output2).cpu().data.numpy()
for idx, one_name in enumerate(name):
pred = output[idx]
pred = pred.transpose(1,2,0)
pred = np.asarray(np.argmax(pred, axis=2), dtype=np.uint8)
output_col = colorize_mask(pred)
print(output_col.size)
one_name = one_name.split('/')[-1]
output_col = output_col.convert('L')
output_col.save('%s/%s.bmp' % (args.save, one_name))
if __name__ == '__main__':
main()
results_folder = SAVE_PATH
gt_folder = '/DATA/charlie/AWC/CADA_Tutorial_Image/Target_Test/mask/'
output_path = results_folder
export_table = True
evaluate_segmentation_results(results_folder, gt_folder, output_path, export_table)
|
[
"os.makedirs",
"argparse.ArgumentParser",
"numpy.argmax",
"torch.autograd.Variable",
"torch.load",
"dataset.refuge.REFUGE",
"os.path.exists",
"packaging.version.parse",
"torch.nn.Upsample",
"models.unet.UNet"
] |
[((1465, 1516), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Unet Network"""'}), "(description='Unet Network')\n", (1488, 1516), False, 'import argparse\n'), ((2912, 2947), 'models.unet.UNet', 'UNet', (['(3)'], {'n_classes': 'args.num_classes'}), '(3, n_classes=args.num_classes)\n', (2916, 2947), False, 'from models.unet import UNet\n'), ((2972, 3001), 'torch.load', 'torch.load', (['args.restore_from'], {}), '(args.restore_from)\n', (2982, 3001), False, 'import torch\n'), ((2841, 2866), 'os.path.exists', 'os.path.exists', (['args.save'], {}), '(args.save)\n', (2855, 2866), False, 'import os\n'), ((2876, 2898), 'os.makedirs', 'os.makedirs', (['args.save'], {}), '(args.save)\n', (2887, 2898), False, 'import os\n'), ((3120, 3174), 'dataset.refuge.REFUGE', 'REFUGE', (['(False)'], {'domain': '"""REFUGE_TEST"""', 'is_transform': '(True)'}), "(False, domain='REFUGE_TEST', is_transform=True)\n", (3126, 3174), False, 'from dataset.refuge import REFUGE\n'), ((3281, 3313), 'packaging.version.parse', 'version.parse', (['torch.__version__'], {}), '(torch.__version__)\n', (3294, 3313), False, 'from packaging import version\n'), ((3317, 3339), 'packaging.version.parse', 'version.parse', (['"""0.4.0"""'], {}), "('0.4.0')\n", (3330, 3339), False, 'from packaging import version\n'), ((3358, 3433), 'torch.nn.Upsample', 'nn.Upsample', ([], {'size': '(ROI_size, ROI_size)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(size=(ROI_size, ROI_size), mode='bilinear', align_corners=True)\n", (3369, 3433), True, 'import torch.nn as nn\n'), ((3461, 3516), 'torch.nn.Upsample', 'nn.Upsample', ([], {'size': '(ROI_size, ROI_size)', 'mode': '"""bilinear"""'}), "(size=(ROI_size, ROI_size), mode='bilinear')\n", (3472, 3516), True, 'import torch.nn as nn\n'), ((3997, 4020), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(2)'}), '(pred, axis=2)\n', (4006, 4020), True, 'import numpy as np\n'), ((3747, 3777), 'torch.autograd.Variable', 'Variable', (['image'], {'volatile': '(True)'}), '(image, volatile=True)\n', (3755, 3777), False, 'from torch.autograd import Variable\n')]
|
import os
import unittest
import subprocess
from paper2tmb.manipulator import Manipulator
class TestManipulator(unittest.TestCase):
def test_init(self):
with Manipulator('test.pdf') as m:
self.assertTrue(os.path.isdir(m.dirname))
def test_pdf2png(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
m.pdf2png()
for i in range(12):
self.assertTrue(os.path.exists(os.path.join(m.dirname, "pdf2png-{}.png".format(i))))
self.assertTrue(m._last == os.path.join(m.dirname, "pdf2png.png"))
def test_pdf2png_trim(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
m.pdf2png(trim="100x100")
for i in range(12):
self.assertTrue(os.path.exists(os.path.join(m.dirname, "pdf2png-{}.png".format(i))))
self.assertTrue(m._last == os.path.join(m.dirname, "pdf2png.png"))
def test_pdf2png_density(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
m.pdf2png(density="20")
for i in range(12):
self.assertTrue(os.path.exists(os.path.join(m.dirname, "pdf2png-{}.png".format(i))))
self.assertTrue(m._last == os.path.join(m.dirname, "pdf2png.png"))
def test_pdf2png_both_trim_density(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
m.pdf2png(trim="300x300", density="10")
for i in range(12):
self.assertTrue(os.path.exists(os.path.join(m.dirname, "pdf2png-{}.png".format(i))))
self.assertTrue(m._last == os.path.join(m.dirname, "pdf2png.png"))
def test_stack(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
m.pdf2png()
m.stack(4, 2)
self.assertTrue(os.path.exists(os.path.join(m.dirname, "stack_row_0.png")))
self.assertTrue(os.path.exists(os.path.join(m.dirname, "stack_row_1.png")))
self.assertTrue(os.path.exists(os.path.join(m.dirname, "stack.png")))
self.assertTrue(m._last == os.path.join(m.dirname, "stack.png"))
def test_stack(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
m.pdf2png(trim="100x60")
m.stack(6, 2)
m.resize("x400")
self.assertTrue(os.path.exists(os.path.join(m.dirname, "resize_x400.png")))
self.assertTrue(m._last == os.path.join(m.dirname, "resize_x400.png"))
def test_top(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
m.pdf2png(trim="400x240", density="300x300")
m.top("60%")
self.assertTrue(os.path.exists(os.path.join(m.dirname, "top_60%-0.png")))
self.assertTrue(m._last == os.path.join(m.dirname, "top_60%-0.png"))
def test_out(self):
with Manipulator("paper2tmb/tests/testdata/1412.6785v2.pdf") as m:
target = "paper2tmb/tests/testdata/out.pdf"
m.out(target)
self.assertTrue(os.path.exists(target))
subprocess.call(["rm", target])
|
[
"paper2tmb.manipulator.Manipulator",
"os.path.isdir",
"os.path.exists",
"subprocess.call",
"os.path.join"
] |
[((3156, 3187), 'subprocess.call', 'subprocess.call', (["['rm', target]"], {}), "(['rm', target])\n", (3171, 3187), False, 'import subprocess\n'), ((174, 197), 'paper2tmb.manipulator.Manipulator', 'Manipulator', (['"""test.pdf"""'], {}), "('test.pdf')\n", (185, 197), False, 'from paper2tmb.manipulator import Manipulator\n'), ((300, 355), 'paper2tmb.manipulator.Manipulator', 'Manipulator', (['"""paper2tmb/tests/testdata/1412.6785v2.pdf"""'], {}), "('paper2tmb/tests/testdata/1412.6785v2.pdf')\n", (311, 355), False, 'from paper2tmb.manipulator import Manipulator\n'), ((647, 702), 'paper2tmb.manipulator.Manipulator', 'Manipulator', (['"""paper2tmb/tests/testdata/1412.6785v2.pdf"""'], {}), "('paper2tmb/tests/testdata/1412.6785v2.pdf')\n", (658, 702), False, 'from paper2tmb.manipulator import Manipulator\n'), ((1011, 1066), 'paper2tmb.manipulator.Manipulator', 'Manipulator', (['"""paper2tmb/tests/testdata/1412.6785v2.pdf"""'], {}), "('paper2tmb/tests/testdata/1412.6785v2.pdf')\n", (1022, 1066), False, 'from paper2tmb.manipulator import Manipulator\n'), ((1383, 1438), 'paper2tmb.manipulator.Manipulator', 'Manipulator', (['"""paper2tmb/tests/testdata/1412.6785v2.pdf"""'], {}), "('paper2tmb/tests/testdata/1412.6785v2.pdf')\n", (1394, 1438), False, 'from paper2tmb.manipulator import Manipulator\n'), ((1751, 1806), 'paper2tmb.manipulator.Manipulator', 'Manipulator', (['"""paper2tmb/tests/testdata/1412.6785v2.pdf"""'], {}), "('paper2tmb/tests/testdata/1412.6785v2.pdf')\n", (1762, 1806), False, 'from paper2tmb.manipulator import Manipulator\n'), ((2241, 2296), 'paper2tmb.manipulator.Manipulator', 'Manipulator', (['"""paper2tmb/tests/testdata/1412.6785v2.pdf"""'], {}), "('paper2tmb/tests/testdata/1412.6785v2.pdf')\n", (2252, 2296), False, 'from paper2tmb.manipulator import Manipulator\n'), ((2605, 2660), 'paper2tmb.manipulator.Manipulator', 'Manipulator', (['"""paper2tmb/tests/testdata/1412.6785v2.pdf"""'], {}), "('paper2tmb/tests/testdata/1412.6785v2.pdf')\n", (2616, 2660), False, 'from paper2tmb.manipulator import Manipulator\n'), ((2955, 3010), 'paper2tmb.manipulator.Manipulator', 'Manipulator', (['"""paper2tmb/tests/testdata/1412.6785v2.pdf"""'], {}), "('paper2tmb/tests/testdata/1412.6785v2.pdf')\n", (2966, 3010), False, 'from paper2tmb.manipulator import Manipulator\n'), ((3124, 3146), 'os.path.exists', 'os.path.exists', (['target'], {}), '(target)\n', (3138, 3146), False, 'import os\n'), ((232, 256), 'os.path.isdir', 'os.path.isdir', (['m.dirname'], {}), '(m.dirname)\n', (245, 256), False, 'import os\n'), ((560, 598), 'os.path.join', 'os.path.join', (['m.dirname', '"""pdf2png.png"""'], {}), "(m.dirname, 'pdf2png.png')\n", (572, 598), False, 'import os\n'), ((921, 959), 'os.path.join', 'os.path.join', (['m.dirname', '"""pdf2png.png"""'], {}), "(m.dirname, 'pdf2png.png')\n", (933, 959), False, 'import os\n'), ((1283, 1321), 'os.path.join', 'os.path.join', (['m.dirname', '"""pdf2png.png"""'], {}), "(m.dirname, 'pdf2png.png')\n", (1295, 1321), False, 'import os\n'), ((1671, 1709), 'os.path.join', 'os.path.join', (['m.dirname', '"""pdf2png.png"""'], {}), "(m.dirname, 'pdf2png.png')\n", (1683, 1709), False, 'import os\n'), ((1908, 1950), 'os.path.join', 'os.path.join', (['m.dirname', '"""stack_row_0.png"""'], {}), "(m.dirname, 'stack_row_0.png')\n", (1920, 1950), False, 'import os\n'), ((1996, 2038), 'os.path.join', 'os.path.join', (['m.dirname', '"""stack_row_1.png"""'], {}), "(m.dirname, 'stack_row_1.png')\n", (2008, 2038), False, 'import os\n'), ((2084, 2120), 'os.path.join', 'os.path.join', (['m.dirname', '"""stack.png"""'], {}), "(m.dirname, 'stack.png')\n", (2096, 2120), False, 'import os\n'), ((2163, 2199), 'os.path.join', 'os.path.join', (['m.dirname', '"""stack.png"""'], {}), "(m.dirname, 'stack.png')\n", (2175, 2199), False, 'import os\n'), ((2439, 2481), 'os.path.join', 'os.path.join', (['m.dirname', '"""resize_x400.png"""'], {}), "(m.dirname, 'resize_x400.png')\n", (2451, 2481), False, 'import os\n'), ((2523, 2565), 'os.path.join', 'os.path.join', (['m.dirname', '"""resize_x400.png"""'], {}), "(m.dirname, 'resize_x400.png')\n", (2535, 2565), False, 'import os\n'), ((2793, 2833), 'os.path.join', 'os.path.join', (['m.dirname', '"""top_60%-0.png"""'], {}), "(m.dirname, 'top_60%-0.png')\n", (2805, 2833), False, 'import os\n'), ((2875, 2915), 'os.path.join', 'os.path.join', (['m.dirname', '"""top_60%-0.png"""'], {}), "(m.dirname, 'top_60%-0.png')\n", (2887, 2915), False, 'import os\n')]
|
from rdkit import Chem
def mol_with_atom_index(mol):
atoms = mol.GetNumAtoms()
tmp_mol = Chem.Mol(mol)
for idx in range(atoms):
tmp_mol.GetAtomWithIdx(idx).SetProp('molAtomMapNumber', str(tmp_mol.GetAtomWithIdx(idx).GetIdx()))
return tmp_mol
def unique_mols(sequence):
seen = set()
return [x for x in sequence if not (tuple(x) in seen or seen.add(tuple(x)))]
|
[
"rdkit.Chem.Mol"
] |
[((98, 111), 'rdkit.Chem.Mol', 'Chem.Mol', (['mol'], {}), '(mol)\n', (106, 111), False, 'from rdkit import Chem\n')]
|
# The frozendict is originally available under the following license:
#
# Copyright (c) 2012 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
import copy
_iteritems = getattr(dict, 'iteritems', dict.items) # py2-3 compatibility
class frozendict(collections.Mapping):
"""
An immutable wrapper around dictionaries that implements the complete
:py:class:`collections.Mapping` interface.
It can be used as a drop-in replacement for dictionaries where immutability
is desired.
"""
dict_cls = dict
def __init__(self, *args, **kwargs):
self._dict = self.dict_cls(*args, **kwargs)
self._hash = None
def __getitem__(self, key):
item = self._dict[key]
if isinstance(item, dict):
item = self._dict[key] = frozendict(**item)
elif isinstance(item, list):
item = self._dict[key] = tuple(item)
elif isinstance(item, set):
item = self._dict[key] = frozenset(item)
elif hasattr(item, '__dict__') or hasattr(item, '__slots__'):
return copy.copy(item)
return item
def __contains__(self, key):
return key in self._dict
def copy(self, **add_or_replace):
return self.__class__(self, **add_or_replace)
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self._dict)
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
result.__dict__.update(dict((
(k, copy.deepcopy(v, memo)) for k, v in self.__dict__.items())))
return result
def __hash__(self):
if self._hash is None:
h = 0
for key, value in _iteritems(self._dict):
h ^= hash((key, value))
self._hash = h
return self._hash
|
[
"copy.deepcopy",
"copy.copy"
] |
[((2100, 2115), 'copy.copy', 'copy.copy', (['item'], {}), '(item)\n', (2109, 2115), False, 'import copy\n'), ((2682, 2704), 'copy.deepcopy', 'copy.deepcopy', (['v', 'memo'], {}), '(v, memo)\n', (2695, 2704), False, 'import copy\n')]
|
"""
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from stream_alert_cli.logger import LOGGER_CLI
import boto3
from botocore.exceptions import ClientError
def _rollback_production(lambda_client, function_name):
"""Rollback the production alias for the given function name."""
version = lambda_client.get_alias(
FunctionName=function_name, Name='production')['FunctionVersion']
if version == '$LATEST':
# This won't happen with Terraform, but the alias could have been manually changed.
LOGGER_CLI.error('%s:production is pointing to $LATEST instead of a published version',
function_name)
return
current_version = int(version)
if current_version == 1:
LOGGER_CLI.warn('%s:production is already at version 1', function_name)
return
LOGGER_CLI.info('Rolling back %s:production from version %d => %d',
function_name, current_version, current_version - 1)
try:
lambda_client.update_alias(
FunctionName=function_name, Name='production', FunctionVersion=str(current_version - 1))
except ClientError:
LOGGER_CLI.exception('version not updated')
def rollback(options, config):
"""Rollback the current production Lambda version(s) by 1.
Args:
options: Argparse parsed options
config (dict): Parsed configuration from conf/
"""
rollback_all = 'all' in options.processor
prefix = config['global']['account']['prefix']
clusters = sorted(options.clusters or config.clusters())
client = boto3.client('lambda')
if rollback_all or 'alert' in options.processor:
_rollback_production(client, '{}_streamalert_alert_processor'.format(prefix))
if rollback_all or 'alert_merger' in options.processor:
_rollback_production(client, '{}_streamalert_alert_merger'.format(prefix))
if rollback_all or 'apps' in options.processor:
for cluster in clusters:
apps_config = config['clusters'][cluster]['modules'].get('stream_alert_apps', {})
for lambda_name in sorted(apps_config):
_rollback_production(client, lambda_name)
if rollback_all or 'athena' in options.processor:
_rollback_production(client, '{}_streamalert_athena_partition_refresh'.format(prefix))
if rollback_all or 'rule' in options.processor:
for cluster in clusters:
_rollback_production(client, '{}_{}_streamalert_rule_processor'.format(prefix, cluster))
if rollback_all or 'threat_intel_downloader' in options.processor:
_rollback_production(client, '{}_streamalert_threat_intel_downloader'.format(prefix))
|
[
"stream_alert_cli.logger.LOGGER_CLI.error",
"stream_alert_cli.logger.LOGGER_CLI.exception",
"boto3.client",
"stream_alert_cli.logger.LOGGER_CLI.warn",
"stream_alert_cli.logger.LOGGER_CLI.info"
] |
[((1351, 1475), 'stream_alert_cli.logger.LOGGER_CLI.info', 'LOGGER_CLI.info', (['"""Rolling back %s:production from version %d => %d"""', 'function_name', 'current_version', '(current_version - 1)'], {}), "('Rolling back %s:production from version %d => %d',\n function_name, current_version, current_version - 1)\n", (1366, 1475), False, 'from stream_alert_cli.logger import LOGGER_CLI\n'), ((2096, 2118), 'boto3.client', 'boto3.client', (['"""lambda"""'], {}), "('lambda')\n", (2108, 2118), False, 'import boto3\n'), ((1043, 1154), 'stream_alert_cli.logger.LOGGER_CLI.error', 'LOGGER_CLI.error', (['"""%s:production is pointing to $LATEST instead of a published version"""', 'function_name'], {}), "(\n '%s:production is pointing to $LATEST instead of a published version',\n function_name)\n", (1059, 1154), False, 'from stream_alert_cli.logger import LOGGER_CLI\n'), ((1259, 1330), 'stream_alert_cli.logger.LOGGER_CLI.warn', 'LOGGER_CLI.warn', (['"""%s:production is already at version 1"""', 'function_name'], {}), "('%s:production is already at version 1', function_name)\n", (1274, 1330), False, 'from stream_alert_cli.logger import LOGGER_CLI\n'), ((1670, 1713), 'stream_alert_cli.logger.LOGGER_CLI.exception', 'LOGGER_CLI.exception', (['"""version not updated"""'], {}), "('version not updated')\n", (1690, 1713), False, 'from stream_alert_cli.logger import LOGGER_CLI\n')]
|
from ortools.linear_solver import pywraplp
from ortools.sat.python import cp_model
def main():
solver = pywraplp.Solver.CreateSolver('SCIP')
infinity = solver.infinity()
# wrenches
wrenches = solver.IntVar(0.0, infinity, 'wrenches')
# pliers
pliers = solver.IntVar(0.0, infinity, 'pliers')
print('Number of variables =', solver.NumVariables())
# constraints
# steel
solver.Add(1.5 * wrenches + pliers <= 27000)
# molding
solver.Add(1.0 * wrenches + pliers <= 21000)
# assembly
solver.Add(0.3 * wrenches + 0.5 * pliers <= 9000)
# demand1
solver.Add(wrenches <= 15000)
# demand2
solver.Add(pliers <= 16000)
print('Number of constraints =', solver.NumConstraints())
# objective function
solver.Maximize(0.13 * wrenches + 0.10 * pliers)
status = solver.Solve()
if status == pywraplp.Solver.OPTIMAL:
print('Solution:')
print('Objective value =', solver.Objective().Value())
print('Wrenches =', wrenches.solution_value())
print('Pliers =', pliers.solution_value())
print('Slack steel', (27000 - (1.5 * wrenches.solution_value() + pliers.solution_value())))
print('Slack molding', (21000 - (1.0 * wrenches.solution_value() + pliers.solution_value())))
print('Slack assembly',(9000 -(0.3 * wrenches.solution_value() + 0.5 * pliers.solution_value())))
print('Slack demand1',(15000 - wrenches.solution_value()))
print('Slack demand2',(16000 - pliers.solution_value()))
else:
print('The problem does not have an optimal solution.')
print('Problem solved in %f milliseconds' % solver.wall_time())
print('Problem solved in %d iterations' % solver.iterations())
print('Problem solved in %d branch-and-bound nodes' % solver.nodes())
if __name__ == '__main__':
main()
|
[
"ortools.linear_solver.pywraplp.Solver.CreateSolver"
] |
[((109, 145), 'ortools.linear_solver.pywraplp.Solver.CreateSolver', 'pywraplp.Solver.CreateSolver', (['"""SCIP"""'], {}), "('SCIP')\n", (137, 145), False, 'from ortools.linear_solver import pywraplp\n')]
|
from django.urls import path, include
from . import views
urlpatterns = [
path('accounts/', include('registration.backends.simple.urls')),
path('', views.FrontPage, name='FrontPage'),
path('tags', views.TagList, name='TagList'),
path('clips', views.ClipList, name='ClipList'),
path('playlist/<str:tag_title>', views.Playlist, name='Playlist'),
path('subscribe', views.Subscribe, name='Subscribe'),
path('upload', views.UploadSon, name='UploadSon'),
path('soundcloud_iframe/<str:soundcloud_id>', views.SoundcloudIframe),
path('youtube_iframe/<str:youtube_id>', views.YoutubeIframe),
path('vimeo_iframe/<str:vimeo_id>', views.VimeoIframe),
]
|
[
"django.urls.path",
"django.urls.include"
] |
[((152, 195), 'django.urls.path', 'path', (['""""""', 'views.FrontPage'], {'name': '"""FrontPage"""'}), "('', views.FrontPage, name='FrontPage')\n", (156, 195), False, 'from django.urls import path, include\n'), ((202, 245), 'django.urls.path', 'path', (['"""tags"""', 'views.TagList'], {'name': '"""TagList"""'}), "('tags', views.TagList, name='TagList')\n", (206, 245), False, 'from django.urls import path, include\n'), ((252, 298), 'django.urls.path', 'path', (['"""clips"""', 'views.ClipList'], {'name': '"""ClipList"""'}), "('clips', views.ClipList, name='ClipList')\n", (256, 298), False, 'from django.urls import path, include\n'), ((305, 370), 'django.urls.path', 'path', (['"""playlist/<str:tag_title>"""', 'views.Playlist'], {'name': '"""Playlist"""'}), "('playlist/<str:tag_title>', views.Playlist, name='Playlist')\n", (309, 370), False, 'from django.urls import path, include\n'), ((377, 429), 'django.urls.path', 'path', (['"""subscribe"""', 'views.Subscribe'], {'name': '"""Subscribe"""'}), "('subscribe', views.Subscribe, name='Subscribe')\n", (381, 429), False, 'from django.urls import path, include\n'), ((436, 485), 'django.urls.path', 'path', (['"""upload"""', 'views.UploadSon'], {'name': '"""UploadSon"""'}), "('upload', views.UploadSon, name='UploadSon')\n", (440, 485), False, 'from django.urls import path, include\n'), ((492, 561), 'django.urls.path', 'path', (['"""soundcloud_iframe/<str:soundcloud_id>"""', 'views.SoundcloudIframe'], {}), "('soundcloud_iframe/<str:soundcloud_id>', views.SoundcloudIframe)\n", (496, 561), False, 'from django.urls import path, include\n'), ((568, 628), 'django.urls.path', 'path', (['"""youtube_iframe/<str:youtube_id>"""', 'views.YoutubeIframe'], {}), "('youtube_iframe/<str:youtube_id>', views.YoutubeIframe)\n", (572, 628), False, 'from django.urls import path, include\n'), ((635, 689), 'django.urls.path', 'path', (['"""vimeo_iframe/<str:vimeo_id>"""', 'views.VimeoIframe'], {}), "('vimeo_iframe/<str:vimeo_id>', views.VimeoIframe)\n", (639, 689), False, 'from django.urls import path, include\n'), ((100, 144), 'django.urls.include', 'include', (['"""registration.backends.simple.urls"""'], {}), "('registration.backends.simple.urls')\n", (107, 144), False, 'from django.urls import path, include\n')]
|
import pandas as pd
import numpy as np
import math
import util
def gimme_pseudo_winsors(inputDf, col, pw=0.05):
return util.round_to_sf(inputDf[col].quantile(pw),3), util.round_to_sf(inputDf[col].quantile(1-pw),3)
def gimme_starting_affect(inputDf, col, segs):
x = inputDf[col]
x1 = float(segs[0])
x2 = float(segs[1])
affectedness = pd.Series([0]*len(inputDf))
affectedness.loc[(x<x1)] = 1
affectedness.loc[(x>=x1) & (x<x2)] = (x2 - x)/(x2 - x1)
return sum(affectedness)
def gimme_normie_affect(inputDf, col, segs, posn):
x = inputDf[col]
x1 = float(segs[posn-1])
x2 = float(segs[posn])
x3 = float(segs[posn+1])
affectedness = pd.Series([0]*len(inputDf))
affectedness.loc[(x>=x1) & (x<x2)] = (x - x1)/(x2 - x1)
affectedness.loc[(x>=x2) & (x<x3)] = (x3 - x)/(x3 - x2)
return sum(affectedness)
def gimme_ending_affect(inputDf, col, segs):
x = inputDf[col]
x1 = float(segs[-2])
x2 = float(segs[-1])
affectedness = pd.Series([0]*len(inputDf))
affectedness.loc[(x>=x2)] = 1
affectedness.loc[(x>=x1) & (x<x2)] = (x - x1)/(x2 - x1)
return sum(affectedness)
def gimme_sa_optimizing_func(inputDf, col, segsSoFar):
def sa_optimizing_func(x):
return gimme_starting_affect(inputDf, col, segsSoFar+[x])
return sa_optimizing_func
def gimme_na_optimizing_func(inputDf, col, segsSoFar):
def na_optimizing_func(x):
return gimme_normie_affect(inputDf, col, segsSoFar+[x], len(segsSoFar)-1)
return na_optimizing_func
def gimme_pa_optimizing_func(inputDf, col, segsSoFar, end):
def pa_optimizing_func(x):
return gimme_normie_affect(inputDf, col, segsSoFar+[x]+[end], len(segsSoFar))
return pa_optimizing_func
if __name__ == "__main__":
dyct = {"x":list(range(100))}
df=pd.DataFrame(dyct)
start, end = gimme_pseudo_winsors(df, "x")
print(start, end)
targetLen=5
goodAmt=float(len(df))/targetLen
segs = [start]
print(segs)
if targetLen>2:
optFunc = gimme_sa_optimizing_func(df, "x", segs)
next = util.target_input_with_output(optFunc, goodAmt, start, end)
segs.append(util.round_to_sf(next,3))
print(segs)
for i in range(targetLen-3):
optFunc = gimme_na_optimizing_func(df, "x", segs)
next = util.target_input_with_output(optFunc, goodAmt, start, end)
segs.append(util.round_to_sf(next,3))
print(segs)
segs.append(end)
print(segs)
print([gimme_starting_affect(df, "x", segs), gimme_normie_affect(df, "x", segs, 1), gimme_normie_affect(df, "x", segs, 2), gimme_normie_affect(df, "x", segs, 3), gimme_ending_affect(df, "x", segs)])
|
[
"pandas.DataFrame",
"util.target_input_with_output",
"util.round_to_sf"
] |
[((1694, 1712), 'pandas.DataFrame', 'pd.DataFrame', (['dyct'], {}), '(dyct)\n', (1706, 1712), True, 'import pandas as pd\n'), ((1931, 1990), 'util.target_input_with_output', 'util.target_input_with_output', (['optFunc', 'goodAmt', 'start', 'end'], {}), '(optFunc, goodAmt, start, end)\n', (1960, 1990), False, 'import util\n'), ((2136, 2195), 'util.target_input_with_output', 'util.target_input_with_output', (['optFunc', 'goodAmt', 'start', 'end'], {}), '(optFunc, goodAmt, start, end)\n', (2165, 2195), False, 'import util\n'), ((2005, 2030), 'util.round_to_sf', 'util.round_to_sf', (['next', '(3)'], {}), '(next, 3)\n', (2021, 2030), False, 'import util\n'), ((2210, 2235), 'util.round_to_sf', 'util.round_to_sf', (['next', '(3)'], {}), '(next, 3)\n', (2226, 2235), False, 'import util\n')]
|
# TODO: Explain 8 corners logic at the top and use it consistently
# Add comments of explanation
import numpy as np
import scipy.spatial
from .rotation import rotate_points_along_z
def get_size(box):
"""
Args:
box: 8x3
Returns:
size: [dx, dy, dz]
"""
distance = scipy.spatial.distance.cdist(box[0:1, :], box[1:5, :])
l = distance[0, 2]
w = distance[0, 0]
h = distance[0, 3]
return [l, w, h]
def get_heading_angle(box):
"""
Args:
box: (8, 3)
Returns:
heading_angle: float
"""
a = box[0, 0] - box[1, 0]
b = box[0, 1] - box[1, 1]
heading_angle = np.arctan2(a, b)
return heading_angle
def compute_box_3d(size, center, rotmat):
"""Compute corners of a single box from rotation matrix
Args:
size: list of float [dx, dy, dz]
center: np.array [x, y, z]
rotmat: np.array (3, 3)
Returns:
corners: (8, 3)
"""
l, h, w = [i / 2 for i in size]
center = np.reshape(center, (-1, 3))
center = center.reshape(3)
x_corners = [l, l, -l, -l, l, l, -l, -l]
y_corners = [h, -h, -h, h, h, -h, -h, h]
z_corners = [w, w, w, w, -w, -w, -w, -w]
corners_3d = np.dot(
np.transpose(rotmat), np.vstack([x_corners, y_corners, z_corners])
)
corners_3d[0, :] += center[0]
corners_3d[1, :] += center[1]
corners_3d[2, :] += center[2]
return np.transpose(corners_3d)
def corners_to_boxes(corners3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
corners: (N, 8, 3), vertex order shown in figure above
Returns:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading]
with (x, y, z) is the box center
(dx, dy, dz) as the box size
and heading as the clockwise rotation angle
"""
boxes3d = np.zeros((corners3d.shape[0], 7))
for i in range(corners3d.shape[0]):
boxes3d[i, :3] = np.mean(corners3d[i, :, :], axis=0)
boxes3d[i, 3:6] = get_size(corners3d[i, :, :])
boxes3d[i, 6] = get_heading_angle(corners3d[i, :, :])
return boxes3d
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading],
(x, y, z) is the box center
Returns:
corners: (N, 8, 3)
"""
template = np.array([[1, 1, -1],
[1, -1, -1],
[-1, -1, -1],
[-1, 1, -1],
[1, 1, 1],
[1, -1, 1],
[-1, -1, 1],
[-1, 1, 1]]
) / 2.
# corners3d: of shape (N, 3, 8)
corners3d = np.tile(boxes3d[:, None, 3:6], (1, 8, 1)) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.reshape(-1, 8, 3), boxes3d[:, 6]).reshape(
-1, 8, 3
)
corners3d += boxes3d[:, None, 0:3]
return corners3d
def points_in_boxes(points, boxes):
"""
Args:
pc: np.array (n, 3+d)
boxes: np.array (m, 8, 3)
Returns:
mask: np.array (n, m) of type bool
"""
if len(boxes) == 0:
return np.zeros([points.shape[0], 1], dtype=np.bool)
points = points[:, :3] # get xyz
# u = p6 - p5
u = boxes[:, 6, :] - boxes[:, 5, :] # (m, 3)
# v = p6 - p7
v = boxes[:, 6, :] - boxes[:, 7, :] # (m, 3)
# w = p6 - p2
w = boxes[:, 6, :] - boxes[:, 2, :] # (m, 3)
# ux, vx, wx
ux = np.matmul(points, u.T) # (n, m)
vx = np.matmul(points, v.T)
wx = np.matmul(points, w.T)
# up6, up5, vp6, vp7, wp6, wp2
up6 = np.sum(u * boxes[:, 6, :], axis=1)
up5 = np.sum(u * boxes[:, 5, :], axis=1)
vp6 = np.sum(v * boxes[:, 6, :], axis=1)
vp7 = np.sum(v * boxes[:, 7, :], axis=1)
wp6 = np.sum(w * boxes[:, 6, :], axis=1)
wp2 = np.sum(w * boxes[:, 2, :], axis=1)
mask_u = np.logical_and(ux <= up6, ux >= up5) # (1024, n)
mask_v = np.logical_and(vx <= vp6, vx >= vp7)
mask_w = np.logical_and(wx <= wp6, wx >= wp2)
mask = mask_u & mask_v & mask_w # (10240, n)
return mask
def poly_area(x,y):
""" Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
def polygon_clip(subjectPolygon, clipPolygon):
""" Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
def inside(p):
return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])
def computeIntersection():
dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]
dp = [s[0] - e[0], s[1] - e[1]]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
if len(outputList) == 0:
return None
return (outputList)
def convex_hull_intersection(p1, p2):
""" Compute area of two convex hull's intersection area.
p1,p2 are a list of (x,y) tuples of hull vertices.
return a list of (x,y) for the intersection and its volume
"""
inter_p = polygon_clip(p1,p2)
if inter_p is not None:
hull_inter = scipy.spatial.ConvexHull(inter_p)
return inter_p, hull_inter.volume
else:
return None, 0.0
def box3d_vol(corners):
''' corners: (8,3) no assumption on axis direction '''
a = np.sqrt(np.sum((corners[0,:] - corners[1,:])**2))
b = np.sqrt(np.sum((corners[1,:] - corners[2,:])**2))
c = np.sqrt(np.sum((corners[0,:] - corners[4,:])**2))
return a*b*c
def box3d_iou(corners1, corners2):
''' Compute 3D bounding box IoU.
Input:
corners1: numpy array (8,3), assume up direction is negative Y
corners2: numpy array (8,3), assume up direction is negative Y
Output:
iou: 3D bounding box IoU
iou_2d: bird's eye view 2D bounding box IoU
'''
# corner points are in counter clockwise order
rect1 = [(corners1[i,0], corners1[i,1]) for i in range(3,-1,-1)]
rect2 = [(corners2[i,0], corners2[i,1]) for i in range(3,-1,-1)]
area1 = poly_area(np.array(rect1)[:,0], np.array(rect1)[:,1])
area2 = poly_area(np.array(rect2)[:,0], np.array(rect2)[:,1])
inter, inter_area = convex_hull_intersection(rect1, rect2)
iou_2d = inter_area/(area1+area2-inter_area)
ymax = min(corners1[:,2].max(), corners2[:,2].max())
ymin = max(corners1[:,2].min(), corners2[:,2].min())
inter_vol = inter_area * max(0.0, ymax-ymin)
vol1 = box3d_vol(corners1)
vol2 = box3d_vol(corners2)
iou = inter_vol / (vol1 + vol2 - inter_vol)
return iou
|
[
"numpy.arctan2",
"numpy.sum",
"numpy.logical_and",
"numpy.roll",
"numpy.zeros",
"numpy.transpose",
"numpy.mean",
"numpy.array",
"numpy.reshape",
"numpy.matmul",
"numpy.tile",
"numpy.vstack"
] |
[((646, 662), 'numpy.arctan2', 'np.arctan2', (['a', 'b'], {}), '(a, b)\n', (656, 662), True, 'import numpy as np\n'), ((1004, 1031), 'numpy.reshape', 'np.reshape', (['center', '(-1, 3)'], {}), '(center, (-1, 3))\n', (1014, 1031), True, 'import numpy as np\n'), ((1417, 1441), 'numpy.transpose', 'np.transpose', (['corners_3d'], {}), '(corners_3d)\n', (1429, 1441), True, 'import numpy as np\n'), ((1937, 1970), 'numpy.zeros', 'np.zeros', (['(corners3d.shape[0], 7)'], {}), '((corners3d.shape[0], 7))\n', (1945, 1970), True, 'import numpy as np\n'), ((3577, 3599), 'numpy.matmul', 'np.matmul', (['points', 'u.T'], {}), '(points, u.T)\n', (3586, 3599), True, 'import numpy as np\n'), ((3619, 3641), 'numpy.matmul', 'np.matmul', (['points', 'v.T'], {}), '(points, v.T)\n', (3628, 3641), True, 'import numpy as np\n'), ((3651, 3673), 'numpy.matmul', 'np.matmul', (['points', 'w.T'], {}), '(points, w.T)\n', (3660, 3673), True, 'import numpy as np\n'), ((3720, 3754), 'numpy.sum', 'np.sum', (['(u * boxes[:, 6, :])'], {'axis': '(1)'}), '(u * boxes[:, 6, :], axis=1)\n', (3726, 3754), True, 'import numpy as np\n'), ((3765, 3799), 'numpy.sum', 'np.sum', (['(u * boxes[:, 5, :])'], {'axis': '(1)'}), '(u * boxes[:, 5, :], axis=1)\n', (3771, 3799), True, 'import numpy as np\n'), ((3810, 3844), 'numpy.sum', 'np.sum', (['(v * boxes[:, 6, :])'], {'axis': '(1)'}), '(v * boxes[:, 6, :], axis=1)\n', (3816, 3844), True, 'import numpy as np\n'), ((3855, 3889), 'numpy.sum', 'np.sum', (['(v * boxes[:, 7, :])'], {'axis': '(1)'}), '(v * boxes[:, 7, :], axis=1)\n', (3861, 3889), True, 'import numpy as np\n'), ((3900, 3934), 'numpy.sum', 'np.sum', (['(w * boxes[:, 6, :])'], {'axis': '(1)'}), '(w * boxes[:, 6, :], axis=1)\n', (3906, 3934), True, 'import numpy as np\n'), ((3945, 3979), 'numpy.sum', 'np.sum', (['(w * boxes[:, 2, :])'], {'axis': '(1)'}), '(w * boxes[:, 2, :], axis=1)\n', (3951, 3979), True, 'import numpy as np\n'), ((3994, 4030), 'numpy.logical_and', 'np.logical_and', (['(ux <= up6)', '(ux >= up5)'], {}), '(ux <= up6, ux >= up5)\n', (4008, 4030), True, 'import numpy as np\n'), ((4057, 4093), 'numpy.logical_and', 'np.logical_and', (['(vx <= vp6)', '(vx >= vp7)'], {}), '(vx <= vp6, vx >= vp7)\n', (4071, 4093), True, 'import numpy as np\n'), ((4107, 4143), 'numpy.logical_and', 'np.logical_and', (['(wx <= wp6)', '(wx >= wp2)'], {}), '(wx <= wp6, wx >= wp2)\n', (4121, 4143), True, 'import numpy as np\n'), ((1231, 1251), 'numpy.transpose', 'np.transpose', (['rotmat'], {}), '(rotmat)\n', (1243, 1251), True, 'import numpy as np\n'), ((1253, 1297), 'numpy.vstack', 'np.vstack', (['[x_corners, y_corners, z_corners]'], {}), '([x_corners, y_corners, z_corners])\n', (1262, 1297), True, 'import numpy as np\n'), ((2036, 2071), 'numpy.mean', 'np.mean', (['corners3d[i, :, :]'], {'axis': '(0)'}), '(corners3d[i, :, :], axis=0)\n', (2043, 2071), True, 'import numpy as np\n'), ((2568, 2682), 'numpy.array', 'np.array', (['[[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1], [1, 1, 1], [1, -1, 1],\n [-1, -1, 1], [-1, 1, 1]]'], {}), '([[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1], [1, 1, 1], [1,\n -1, 1], [-1, -1, 1], [-1, 1, 1]])\n', (2576, 2682), True, 'import numpy as np\n'), ((2798, 2839), 'numpy.tile', 'np.tile', (['boxes3d[:, None, 3:6]', '(1, 8, 1)'], {}), '(boxes3d[:, None, 3:6], (1, 8, 1))\n', (2805, 2839), True, 'import numpy as np\n'), ((3262, 3307), 'numpy.zeros', 'np.zeros', (['[points.shape[0], 1]'], {'dtype': 'np.bool'}), '([points.shape[0], 1], dtype=np.bool)\n', (3270, 3307), True, 'import numpy as np\n'), ((6481, 6525), 'numpy.sum', 'np.sum', (['((corners[0, :] - corners[1, :]) ** 2)'], {}), '((corners[0, :] - corners[1, :]) ** 2)\n', (6487, 6525), True, 'import numpy as np\n'), ((6539, 6583), 'numpy.sum', 'np.sum', (['((corners[1, :] - corners[2, :]) ** 2)'], {}), '((corners[1, :] - corners[2, :]) ** 2)\n', (6545, 6583), True, 'import numpy as np\n'), ((6597, 6641), 'numpy.sum', 'np.sum', (['((corners[0, :] - corners[4, :]) ** 2)'], {}), '((corners[0, :] - corners[4, :]) ** 2)\n', (6603, 6641), True, 'import numpy as np\n'), ((7201, 7216), 'numpy.array', 'np.array', (['rect1'], {}), '(rect1)\n', (7209, 7216), True, 'import numpy as np\n'), ((7223, 7238), 'numpy.array', 'np.array', (['rect1'], {}), '(rect1)\n', (7231, 7238), True, 'import numpy as np\n'), ((7267, 7282), 'numpy.array', 'np.array', (['rect2'], {}), '(rect2)\n', (7275, 7282), True, 'import numpy as np\n'), ((7289, 7304), 'numpy.array', 'np.array', (['rect2'], {}), '(rect2)\n', (7297, 7304), True, 'import numpy as np\n'), ((4374, 4387), 'numpy.roll', 'np.roll', (['y', '(1)'], {}), '(y, 1)\n', (4381, 4387), True, 'import numpy as np\n'), ((4397, 4410), 'numpy.roll', 'np.roll', (['x', '(1)'], {}), '(x, 1)\n', (4404, 4410), True, 'import numpy as np\n')]
|
from merc import config
from merc import feature
from merc import message
class MotdFeature(feature.Feature):
NAME = __name__
CONFIG_SECTION = 'motd'
install = MotdFeature.install
@MotdFeature.register_config_checker
def check_config(section):
return config.validate(section, str)
class MotdReply(message.Reply):
NAME = "372"
FORCE_TRAILING = True
MIN_ARITY = 1
def __init__(self, line, *args):
self.line = line
def as_reply_params(self):
return [self.line]
class MotdStart(message.Reply):
NAME = "375"
FORCE_TRAILING = True
MIN_ARITY = 1
def __init__(self, reason, *args):
self.reason = reason
def as_reply_params(self):
return [self.reason]
class EndOfMotd(message.Reply):
NAME = "376"
FORCE_TRAILING = True
MIN_ARITY = 1
def __init__(self, reason="End of /MOTD command", *args):
self.reason = reason
def as_reply_params(self):
return [self.reason]
@MotdFeature.register_user_command
class Motd(message.Command):
NAME = "MOTD"
MIN_ARITY = 0
@message.Command.requires_registration
def handle_for(self, app, user, prefix):
motd = app.features.get_config_section(__name__)
user.send_reply(MotdStart(
"- {} Message of the Day".format(app.server.name)))
for line in motd.splitlines():
user.send_reply(MotdReply("- " + line))
user.send_reply(EndOfMotd())
@MotdFeature.hook("user.welcome")
def send_motd_on_welcome(app, user):
user.on_message(app, user.hostmask, Motd())
|
[
"merc.config.validate"
] |
[((263, 292), 'merc.config.validate', 'config.validate', (['section', 'str'], {}), '(section, str)\n', (278, 292), False, 'from merc import config\n')]
|
# Copyright 2021 the Autoware Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_index_python import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.conditions import IfCondition
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
import os
def generate_launch_description():
"""
Launch perception nodes.
* euclidean_cluster
* off_map_obstacles_filter
* ray_ground_classifier
"""
autoware_auto_launch_pkg_prefix = get_package_share_directory(
'autoware_auto_launch')
euclidean_cluster_param_file = os.path.join(
autoware_auto_launch_pkg_prefix, 'param/euclidean_cluster.param.yaml')
off_map_obstacles_filter_param_file = os.path.join(
autoware_auto_launch_pkg_prefix, 'param/off_map_obstacles_filter.param.yaml')
ray_ground_classifier_param_file = os.path.join(
autoware_auto_launch_pkg_prefix, 'param/ray_ground_classifier.param.yaml')
# Arguments
with_obstacles_param = DeclareLaunchArgument(
'with_obstacles',
default_value='True',
description='Enable obstacle detection'
)
euclidean_cluster_param = DeclareLaunchArgument(
'euclidean_cluster_param_file',
default_value=euclidean_cluster_param_file,
description='Path to config file for Euclidean Clustering'
)
off_map_obstacles_filter_param = DeclareLaunchArgument(
'off_map_obstacles_filter_param_file',
default_value=off_map_obstacles_filter_param_file,
description='Path to parameter file for off-map obstacle filter'
)
ray_ground_classifier_param = DeclareLaunchArgument(
'ray_ground_classifier_param_file',
default_value=ray_ground_classifier_param_file,
description='Path to config file for Ray Ground Classifier'
)
# Nodes
euclidean_clustering = Node(
package='euclidean_cluster_nodes',
executable='euclidean_cluster_node_exe',
namespace='perception',
condition=IfCondition(LaunchConfiguration('with_obstacles')),
parameters=[LaunchConfiguration('euclidean_cluster_param_file')],
remappings=[
("points_in", "points_nonground")
]
)
off_map_obstacles_filter = Node(
package='off_map_obstacles_filter_nodes',
name='off_map_obstacles_filter_node',
namespace='perception',
executable='off_map_obstacles_filter_nodes_exe',
condition=IfCondition(LaunchConfiguration('with_obstacles')),
parameters=[LaunchConfiguration('off_map_obstacles_filter_param_file')],
output='screen',
remappings=[
('bounding_boxes_in', 'lidar_bounding_boxes'),
('bounding_boxes_out', 'lidar_bounding_boxes_filtered'),
('HAD_Map_Service', '/had_maps/HAD_Map_Service'),
]
)
ray_ground_classifier = Node(
package='ray_ground_classifier_nodes',
executable='ray_ground_classifier_cloud_node_exe',
namespace='perception',
condition=IfCondition(LaunchConfiguration('with_obstacles')),
parameters=[LaunchConfiguration('ray_ground_classifier_param_file')],
remappings=[("points_in", "/lidars/points_fused")]
)
return LaunchDescription([
euclidean_cluster_param,
ray_ground_classifier_param,
with_obstacles_param,
off_map_obstacles_filter_param,
euclidean_clustering,
ray_ground_classifier,
off_map_obstacles_filter,
])
|
[
"launch.actions.DeclareLaunchArgument",
"launch.substitutions.LaunchConfiguration",
"launch.LaunchDescription",
"ament_index_python.get_package_share_directory",
"os.path.join"
] |
[((1082, 1133), 'ament_index_python.get_package_share_directory', 'get_package_share_directory', (['"""autoware_auto_launch"""'], {}), "('autoware_auto_launch')\n", (1109, 1133), False, 'from ament_index_python import get_package_share_directory\n'), ((1178, 1265), 'os.path.join', 'os.path.join', (['autoware_auto_launch_pkg_prefix', '"""param/euclidean_cluster.param.yaml"""'], {}), "(autoware_auto_launch_pkg_prefix,\n 'param/euclidean_cluster.param.yaml')\n", (1190, 1265), False, 'import os\n'), ((1313, 1407), 'os.path.join', 'os.path.join', (['autoware_auto_launch_pkg_prefix', '"""param/off_map_obstacles_filter.param.yaml"""'], {}), "(autoware_auto_launch_pkg_prefix,\n 'param/off_map_obstacles_filter.param.yaml')\n", (1325, 1407), False, 'import os\n'), ((1452, 1543), 'os.path.join', 'os.path.join', (['autoware_auto_launch_pkg_prefix', '"""param/ray_ground_classifier.param.yaml"""'], {}), "(autoware_auto_launch_pkg_prefix,\n 'param/ray_ground_classifier.param.yaml')\n", (1464, 1543), False, 'import os\n'), ((1593, 1700), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', (['"""with_obstacles"""'], {'default_value': '"""True"""', 'description': '"""Enable obstacle detection"""'}), "('with_obstacles', default_value='True', description=\n 'Enable obstacle detection')\n", (1614, 1700), False, 'from launch.actions import DeclareLaunchArgument\n'), ((1756, 1923), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', (['"""euclidean_cluster_param_file"""'], {'default_value': 'euclidean_cluster_param_file', 'description': '"""Path to config file for Euclidean Clustering"""'}), "('euclidean_cluster_param_file', default_value=\n euclidean_cluster_param_file, description=\n 'Path to config file for Euclidean Clustering')\n", (1777, 1923), False, 'from launch.actions import DeclareLaunchArgument\n'), ((1981, 2168), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', (['"""off_map_obstacles_filter_param_file"""'], {'default_value': 'off_map_obstacles_filter_param_file', 'description': '"""Path to parameter file for off-map obstacle filter"""'}), "('off_map_obstacles_filter_param_file', default_value=\n off_map_obstacles_filter_param_file, description=\n 'Path to parameter file for off-map obstacle filter')\n", (2002, 2168), False, 'from launch.actions import DeclareLaunchArgument\n'), ((2223, 2399), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', (['"""ray_ground_classifier_param_file"""'], {'default_value': 'ray_ground_classifier_param_file', 'description': '"""Path to config file for Ray Ground Classifier"""'}), "('ray_ground_classifier_param_file', default_value=\n ray_ground_classifier_param_file, description=\n 'Path to config file for Ray Ground Classifier')\n", (2244, 2399), False, 'from launch.actions import DeclareLaunchArgument\n'), ((3839, 4045), 'launch.LaunchDescription', 'LaunchDescription', (['[euclidean_cluster_param, ray_ground_classifier_param, with_obstacles_param,\n off_map_obstacles_filter_param, euclidean_clustering,\n ray_ground_classifier, off_map_obstacles_filter]'], {}), '([euclidean_cluster_param, ray_ground_classifier_param,\n with_obstacles_param, off_map_obstacles_filter_param,\n euclidean_clustering, ray_ground_classifier, off_map_obstacles_filter])\n', (3856, 4045), False, 'from launch import LaunchDescription\n'), ((2620, 2657), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""with_obstacles"""'], {}), "('with_obstacles')\n", (2639, 2657), False, 'from launch.substitutions import LaunchConfiguration\n'), ((2680, 2731), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""euclidean_cluster_param_file"""'], {}), "('euclidean_cluster_param_file')\n", (2699, 2731), False, 'from launch.substitutions import LaunchConfiguration\n'), ((3069, 3106), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""with_obstacles"""'], {}), "('with_obstacles')\n", (3088, 3106), False, 'from launch.substitutions import LaunchConfiguration\n'), ((3129, 3187), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""off_map_obstacles_filter_param_file"""'], {}), "('off_map_obstacles_filter_param_file')\n", (3148, 3187), False, 'from launch.substitutions import LaunchConfiguration\n'), ((3644, 3681), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""with_obstacles"""'], {}), "('with_obstacles')\n", (3663, 3681), False, 'from launch.substitutions import LaunchConfiguration\n'), ((3704, 3759), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""ray_ground_classifier_param_file"""'], {}), "('ray_ground_classifier_param_file')\n", (3723, 3759), False, 'from launch.substitutions import LaunchConfiguration\n')]
|
""" Compute resonances using the cxroots library (contour integration techniques)
Authors: <NAME>, <NAME>
Karlsruhe Institute of Technology, Germany
University of California, Merced
Last modified: 20/04/2021
"""
from sys import argv
import matplotlib.pyplot as plt
import numpy as np
from cxroots import AnnulusSector, Circle
from scipy.special import h1vp, hankel1, iv, ivp
## Entries ##
ε = float(argv[1]) # For example -1.1 + 1e-2 * 1j
η = np.sqrt(-ε)
print(f"η = {η}")
c = η + 1 / η
## Internal functions ##
def rootsAnnSec(m, rMin, rMax, aMin, aMax):
f0 = lambda k: ivp(m, η * k) * hankel1(m, k) / η + iv(m, η * k) * h1vp(m, k)
f1 = (
lambda k: ivp(m, η * k, 2) * hankel1(m, k)
+ c * ivp(m, η * k) * h1vp(m, k)
+ iv(m, η * k) * h1vp(m, k, 2)
)
A = AnnulusSector(center=0.0, radii=(rMin, rMax), phiRange=(aMin, aMax))
z = A.roots(f0, df=f1)
return z.roots
def writeFile(myFile, m, z):
if np.size(z, 0):
for i in range(np.size(z, 0)):
myFile.write(f"{m} {z[i].real} {z[i].imag}\n")
def calcInt():
plaTrue = ε > -1.0
if plaTrue:
Int = open(f"eps_{ε}_int", "w")
Pla = open(f"eps_{ε}_pla", "w")
else:
Int = open(f"eps_{ε}_int", "w")
for m in range(65):
print(f"m = {m}")
f0 = lambda k: ivp(m, η * k) * hankel1(m, k) / η + iv(m, η * k) * h1vp(m, k)
f1 = (
lambda k: ivp(m, η * k, 2) * hankel1(m, k)
+ c * ivp(m, η * k) * h1vp(m, k)
+ iv(m, η * k) * h1vp(m, k, 2)
)
t = np.linspace(0.2, 65.0, num=1024)
k = 1j * t
rf = np.real(f0(k))
ind = np.where(rf[1:] * rf[:-1] < 0.0)[0]
roots = np.zeros(np.shape(ind), dtype=complex)
for a, i in enumerate(ind):
C = Circle(center=1j * (t[i] + t[i + 1]) / 2.0, radius=(t[i + 1] - t[i]))
z = C.roots(f0, df=f1)
roots[a] = z.roots[0]
if plaTrue:
if m:
writeFile(Int, m, roots[1:])
writeFile(Pla, m, roots[[0]])
else:
writeFile(Int, m, roots)
else:
writeFile(Int, m, roots)
if plaTrue:
Int.close()
Pla.close()
else:
Int.close()
calcInt()
def calcResPla():
if ε < -1.0:
Pla = open(f"eps_{ε}_pla", "w")
angle = -np.pi / 4.0
for m in range(1, 65):
r = max(0.1, 0.9 * np.sqrt(1.0 - η ** (-2)) * m - 1.0)
R = max(2.0, 1.1 * np.sqrt(1.0 - η ** (-2)) * m + 1.0)
a = min(angle, -1e-3)
z = rootsAnnSec(m, r, R, a, 1e-3)
writeFile(Pla, m, z)
angle = np.angle(z[0])
Pla.close()
calcResPla()
def calcResOut():
Out = open(f"eps_{ε}_out", "w")
rMin = 0.2
rMax = 5.0
aMin = -np.pi + 0.01
aMax = 0.0
for m in range(33, 65):
print(f"m = {m}")
z = rootsAnnSec(m, rMin, rMax, aMin, aMax)
writeFile(Out, m, z)
if m > 3:
zMod = np.abs(z)
zArg = np.angle(z)
rMin = max(0.2, np.amin(zMod) * 0.75)
rMax = max(rMax, np.amax(zMod) + 3.0)
aMin = min(aMin, (-np.pi + np.amin(zArg)) / 2.0)
aMax = np.amax(zArg) / 2.0
Out.close()
calcResOut()
def calc_cx_pla():
with open(f"eps_{ε}_pla", "w") as file:
rMin, rMax = 0.1, 0.5
aMin = -np.pi / 4
for m in range(1, 65):
z = rootsAnnSec(m, rMin, rMax, aMin, 1e-3)[0]
file.write(f"{m} {z.real} {z.imag}\n")
rMin = abs(z)
rMax = abs(z) * (m + 1) / m + 1
aMin = min(2.5 * np.angle(z), -1e-3)
print(m, rMin, rMax, aMin)
calc_cx_pla()
def rewriteSave():
Int = np.loadtxt(f"eps_{ε}_int")
Pla = np.loadtxt(f"eps_{ε}_pla")
Out = np.loadtxt(f"eps_{ε}_out")
ind = np.argsort(Out[:, 1])[::-1]
out2 = Out[ind]
rep = out2[:, 1] > -1e-3
np.savez(f"eps_{ε}.npz", inner=Int, plasmon=Pla, outer=out2[rep])
rewriteSave()
def rewriteSave_pla():
Pla = np.loadtxt(f"eps_{ε}_pla")
np.savez(f"eps_{ε}.npz", plasmon=Pla)
# rewriteSave_pla()
|
[
"scipy.special.h1vp",
"numpy.size",
"numpy.abs",
"scipy.special.ivp",
"numpy.amin",
"numpy.angle",
"numpy.argsort",
"numpy.shape",
"cxroots.Circle",
"numpy.where",
"scipy.special.iv",
"numpy.loadtxt",
"numpy.linspace",
"cxroots.AnnulusSector",
"numpy.amax",
"scipy.special.hankel1",
"numpy.savez",
"numpy.sqrt"
] |
[((482, 493), 'numpy.sqrt', 'np.sqrt', (['(-ε)'], {}), '(-ε)\n', (489, 493), True, 'import numpy as np\n'), ((834, 902), 'cxroots.AnnulusSector', 'AnnulusSector', ([], {'center': '(0.0)', 'radii': '(rMin, rMax)', 'phiRange': '(aMin, aMax)'}), '(center=0.0, radii=(rMin, rMax), phiRange=(aMin, aMax))\n', (847, 902), False, 'from cxroots import AnnulusSector, Circle\n'), ((987, 1000), 'numpy.size', 'np.size', (['z', '(0)'], {}), '(z, 0)\n', (994, 1000), True, 'import numpy as np\n'), ((3819, 3845), 'numpy.loadtxt', 'np.loadtxt', (['f"""eps_{ε}_int"""'], {}), "(f'eps_{ε}_int')\n", (3829, 3845), True, 'import numpy as np\n'), ((3856, 3882), 'numpy.loadtxt', 'np.loadtxt', (['f"""eps_{ε}_pla"""'], {}), "(f'eps_{ε}_pla')\n", (3866, 3882), True, 'import numpy as np\n'), ((3893, 3919), 'numpy.loadtxt', 'np.loadtxt', (['f"""eps_{ε}_out"""'], {}), "(f'eps_{ε}_out')\n", (3903, 3919), True, 'import numpy as np\n'), ((4013, 4078), 'numpy.savez', 'np.savez', (['f"""eps_{ε}.npz"""'], {'inner': 'Int', 'plasmon': 'Pla', 'outer': 'out2[rep]'}), "(f'eps_{ε}.npz', inner=Int, plasmon=Pla, outer=out2[rep])\n", (4021, 4078), True, 'import numpy as np\n'), ((4130, 4156), 'numpy.loadtxt', 'np.loadtxt', (['f"""eps_{ε}_pla"""'], {}), "(f'eps_{ε}_pla')\n", (4140, 4156), True, 'import numpy as np\n'), ((4161, 4198), 'numpy.savez', 'np.savez', (['f"""eps_{ε}.npz"""'], {'plasmon': 'Pla'}), "(f'eps_{ε}.npz', plasmon=Pla)\n", (4169, 4198), True, 'import numpy as np\n'), ((1605, 1637), 'numpy.linspace', 'np.linspace', (['(0.2)', '(65.0)'], {'num': '(1024)'}), '(0.2, 65.0, num=1024)\n', (1616, 1637), True, 'import numpy as np\n'), ((3931, 3952), 'numpy.argsort', 'np.argsort', (['Out[:, 1]'], {}), '(Out[:, 1])\n', (3941, 3952), True, 'import numpy as np\n'), ((1025, 1038), 'numpy.size', 'np.size', (['z', '(0)'], {}), '(z, 0)\n', (1032, 1038), True, 'import numpy as np\n'), ((1700, 1732), 'numpy.where', 'np.where', (['(rf[1:] * rf[:-1] < 0.0)'], {}), '(rf[1:] * rf[:-1] < 0.0)\n', (1708, 1732), True, 'import numpy as np\n'), ((1761, 1774), 'numpy.shape', 'np.shape', (['ind'], {}), '(ind)\n', (1769, 1774), True, 'import numpy as np\n'), ((1843, 1912), 'cxroots.Circle', 'Circle', ([], {'center': '(1.0j * (t[i] + t[i + 1]) / 2.0)', 'radius': '(t[i + 1] - t[i])'}), '(center=1.0j * (t[i] + t[i + 1]) / 2.0, radius=t[i + 1] - t[i])\n', (1849, 1912), False, 'from cxroots import AnnulusSector, Circle\n'), ((2727, 2741), 'numpy.angle', 'np.angle', (['z[0]'], {}), '(z[0])\n', (2735, 2741), True, 'import numpy as np\n'), ((3079, 3088), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (3085, 3088), True, 'import numpy as np\n'), ((3108, 3119), 'numpy.angle', 'np.angle', (['z'], {}), '(z)\n', (3116, 3119), True, 'import numpy as np\n'), ((653, 665), 'scipy.special.iv', 'iv', (['m', '(η * k)'], {}), '(m, η * k)\n', (655, 665), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((669, 679), 'scipy.special.h1vp', 'h1vp', (['m', 'k'], {}), '(m, k)\n', (673, 679), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((790, 802), 'scipy.special.iv', 'iv', (['m', '(η * k)'], {}), '(m, η * k)\n', (792, 802), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((806, 819), 'scipy.special.h1vp', 'h1vp', (['m', 'k', '(2)'], {}), '(m, k, 2)\n', (810, 819), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((3300, 3313), 'numpy.amax', 'np.amax', (['zArg'], {}), '(zArg)\n', (3307, 3313), True, 'import numpy as np\n'), ((615, 628), 'scipy.special.ivp', 'ivp', (['m', '(η * k)'], {}), '(m, η * k)\n', (618, 628), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((632, 645), 'scipy.special.hankel1', 'hankel1', (['m', 'k'], {}), '(m, k)\n', (639, 645), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((706, 722), 'scipy.special.ivp', 'ivp', (['m', '(η * k)', '(2)'], {}), '(m, η * k, 2)\n', (709, 722), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((726, 739), 'scipy.special.hankel1', 'hankel1', (['m', 'k'], {}), '(m, k)\n', (733, 739), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((770, 780), 'scipy.special.h1vp', 'h1vp', (['m', 'k'], {}), '(m, k)\n', (774, 780), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1400, 1412), 'scipy.special.iv', 'iv', (['m', '(η * k)'], {}), '(m, η * k)\n', (1402, 1412), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1416, 1426), 'scipy.special.h1vp', 'h1vp', (['m', 'k'], {}), '(m, k)\n', (1420, 1426), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1553, 1565), 'scipy.special.iv', 'iv', (['m', '(η * k)'], {}), '(m, η * k)\n', (1555, 1565), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1569, 1582), 'scipy.special.h1vp', 'h1vp', (['m', 'k', '(2)'], {}), '(m, k, 2)\n', (1573, 1582), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((3148, 3161), 'numpy.amin', 'np.amin', (['zMod'], {}), '(zMod)\n', (3155, 3161), True, 'import numpy as np\n'), ((3199, 3212), 'numpy.amax', 'np.amax', (['zMod'], {}), '(zMod)\n', (3206, 3212), True, 'import numpy as np\n'), ((3713, 3724), 'numpy.angle', 'np.angle', (['z'], {}), '(z)\n', (3721, 3724), True, 'import numpy as np\n'), ((753, 766), 'scipy.special.ivp', 'ivp', (['m', '(η * k)'], {}), '(m, η * k)\n', (756, 766), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1362, 1375), 'scipy.special.ivp', 'ivp', (['m', '(η * k)'], {}), '(m, η * k)\n', (1365, 1375), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1379, 1392), 'scipy.special.hankel1', 'hankel1', (['m', 'k'], {}), '(m, k)\n', (1386, 1392), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1461, 1477), 'scipy.special.ivp', 'ivp', (['m', '(η * k)', '(2)'], {}), '(m, η * k, 2)\n', (1464, 1477), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1481, 1494), 'scipy.special.hankel1', 'hankel1', (['m', 'k'], {}), '(m, k)\n', (1488, 1494), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((1529, 1539), 'scipy.special.h1vp', 'h1vp', (['m', 'k'], {}), '(m, k)\n', (1533, 1539), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((3259, 3272), 'numpy.amin', 'np.amin', (['zArg'], {}), '(zArg)\n', (3266, 3272), True, 'import numpy as np\n'), ((1512, 1525), 'scipy.special.ivp', 'ivp', (['m', '(η * k)'], {}), '(m, η * k)\n', (1515, 1525), False, 'from scipy.special import h1vp, hankel1, iv, ivp\n'), ((2490, 2512), 'numpy.sqrt', 'np.sqrt', (['(1.0 - η ** -2)'], {}), '(1.0 - η ** -2)\n', (2497, 2512), True, 'import numpy as np\n'), ((2557, 2579), 'numpy.sqrt', 'np.sqrt', (['(1.0 - η ** -2)'], {}), '(1.0 - η ** -2)\n', (2564, 2579), True, 'import numpy as np\n')]
|
""" Advent of code 2020 day 4/2 """
import logging
import math
from os import path
import re
record_splitter = re.compile(' |\n')
# Field info:
# byr (Birth Year) - four digits; at least 1920 and at most 2002.
# iyr (Issue Year) - four digits; at least 2010 and at most 2020.
# eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
# hgt (Height) - a number followed by either cm or in:
# If cm, the number must be at least 150 and at most 193.
# If in, the number must be at least 59 and at most 76.
# hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
# ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
# pid (Passport ID) - a nine-digit number, including leading zeroes.
# cid (Country ID) - ignored, missing or not.
def height_validator(x):
match = re.match(r'^(\d+)(cm|in)$', x)
if match is not None:
value = match.group(1)
unit = match.group(2)
if unit == "cm":
return 150 <= int(value) <= 193
elif unit == "in":
return 59 <= int(value) <= 76
expected_fields = [
{"key": 'byr', "validator": lambda x:
re.match(r'^\d{4}$', x) is not None and (1920 <= int(x) <= 2002)}, # (Birth Year)
{"key": 'iyr', "validator": lambda x: \
re.match(r'^\d{4}$', x) is not None and (2010 <= int(x) <= 2020)}, # (Issue Year)
{"key": 'eyr', "validator": lambda x: \
re.match(r'^\d{4}$', x) is not None and (2020 <= int(x) <= 2030)}, # (Expiration Year)
{"key": 'hgt', "validator": height_validator}, # (Height)
{"key": 'hcl', "validator": lambda x: \
re.match(r'^#[a-f0-9]{6}$', x) is not None}, # (Hair Color)
{"key": 'ecl', "validator": lambda x: \
re.match(r'^amb|blu|brn|gry|grn|hzl|oth$', x) is not None}, # (Eye Color)
{"key": 'pid', "validator": lambda x: \
re.match(r'^\d{9}$', x) is not None}, # (Passport ID)
# {"key": 'cid', "validator": lambda x: \
# True}, # (Country ID),
]
class PassportProcessor(object):
def __init__(self, records):
self.records = records
def validate_field(self, record, field):
result = field["key"] in record and field["validator"](record[field["key"]])
# print(result, record)
return result
def solve(self):
result = 0
for record in self.records:
result += 1 if all([self.validate_field(record, field) for field in expected_fields]) else 0
return result
def solution(data):
""" Solution to the problem """
# split records by empty lines, split fields by ":"-s, create a list of dictionaries from the records.
lines = [{key: value for [key, value] in [field.split(
":") for field in record_splitter.split(record)]} for record in data.split("\n\n")]
solver = PassportProcessor(lines)
return solver.solve()
if __name__ == "__main__":
with(open(path.join(path.dirname(__file__), 'input.txt'), 'r')) as input_file:
print(solution(input_file.read()))
|
[
"os.path.dirname",
"re.match",
"re.compile"
] |
[((113, 131), 're.compile', 're.compile', (['""" |\n"""'], {}), "(' |\\n')\n", (123, 131), False, 'import re\n'), ((809, 839), 're.match', 're.match', (['"""^(\\\\d+)(cm|in)$"""', 'x'], {}), "('^(\\\\d+)(cm|in)$', x)\n", (817, 839), False, 'import re\n'), ((1611, 1640), 're.match', 're.match', (['"""^#[a-f0-9]{6}$"""', 'x'], {}), "('^#[a-f0-9]{6}$', x)\n", (1619, 1640), False, 'import re\n'), ((1724, 1768), 're.match', 're.match', (['"""^amb|blu|brn|gry|grn|hzl|oth$"""', 'x'], {}), "('^amb|blu|brn|gry|grn|hzl|oth$', x)\n", (1732, 1768), False, 'import re\n'), ((1851, 1874), 're.match', 're.match', (['"""^\\\\d{9}$"""', 'x'], {}), "('^\\\\d{9}$', x)\n", (1859, 1874), False, 'import re\n'), ((2909, 2931), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (2921, 2931), False, 'from os import path\n'), ((1138, 1161), 're.match', 're.match', (['"""^\\\\d{4}$"""', 'x'], {}), "('^\\\\d{4}$', x)\n", (1146, 1161), False, 'import re\n'), ((1273, 1296), 're.match', 're.match', (['"""^\\\\d{4}$"""', 'x'], {}), "('^\\\\d{4}$', x)\n", (1281, 1296), False, 'import re\n'), ((1408, 1431), 're.match', 're.match', (['"""^\\\\d{4}$"""', 'x'], {}), "('^\\\\d{4}$', x)\n", (1416, 1431), False, 'import re\n')]
|
import os
import sys
import subprocess
from typing import Union, TextIO, List, AnyStr
import smpl.log_module as logger
#
# This module executes commands, manages output from those commands and provides a dry-run capability.
#
# The primary function is
#
# def run(cmd, where)
#
# dry-run and output options are controlled by:
#
# def configure(arg_dry_run, arg_reporting_option)
#
# both arguments can ge provided as kw-args and have defaults; no dry-run and report everything
# configure() should be called before any calls to run()
#
# Output options are:
# REPORTING_OPTION_STDOUT_STDERR : simple pass through stdout and stderr
# REPORTING_OPTION_STDOUT_ONLY : simple pass through stdout and show any stderr output only on a failure
# REPORTING_OPTION_STDERR_ONLY : show stderr only on a failure and does not show any stdout
# REPORTING_OPTION_NEITHER : shows no output either from stdout or stderr
# REPORTING_OPTION_STDERR_STDOUT_PROGRESS : shows stderr only on a failure and prints an X for each line
# of stdout - does this in realtime while the command is executing
#
REPORTING_OPT_STDOUT_STDERR = 1
REPORTING_OPT_STDOUT_ONLY = 2
REPORTING_OPT_STDERR_ONLY = 3
REPORTING_OPT_STDERR_STDOUT_PROGRESS = 5
REPORTING_OPT_NEITHER = 4
class Options:
def __init__(self):
self.reporting_option = REPORTING_OPT_STDOUT_ONLY
self.dry_run = False
options: Options = Options()
def configure(arg_reporting_option = REPORTING_OPT_STDOUT_STDERR, arg_dry_run: bool = False) -> None:
options.reporting_option = arg_reporting_option
options.dry_run = arg_dry_run
logger.debugln("dry_run: {} reporting: {}".format(options.dry_run, options.reporting_option))
def exec_cmd(cmd, where: Union[str, None]) -> None:
""" Does the hard work of executing commands, optionally in the given directory
with the reporting global reporting option.
On failure of the command it quits the program
"""
logger.debugln(" cmd: {} where: {} dry_run: {}".format(",".join(cmd), where, options.dry_run))
if options.dry_run:
return
if where is None:
where = os.getcwd()
try:
stderr_output = "unassigned"
if options.reporting_option == REPORTING_OPT_STDOUT_STDERR:
result = subprocess.run(cmd, cwd = where)
retcode = result.returncode
elif options.reporting_option == REPORTING_OPT_STDOUT_ONLY:
result = subprocess.run(cmd, cwd = where, stderr=subprocess.PIPE)
retcode = result.returncode
stderr_output = result.stderr
elif options.reporting_option == REPORTING_OPT_STDERR_ONLY:
result = subprocess.run(cmd, cwd = where, stdout=subprocess.PIPE)
retcode = result.returncode
stderr_output = result.stderr
elif options.reporting_option == REPORTING_OPT_STDERR_STDOUT_PROGRESS:
count = 0
result = subprocess.Popen(cmd, cwd = where, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while result.poll() is None:
if count == 0:
sys.stdout.write("\n")
stdoutline = result.stdout.readline()
sys.stdout.write("X")
count = (count + 1) % 50
flush = result.stdout.read()
sys.stdout.write("YY\n")
# sys.stdout.write("\n")
result.stdout.close()
# print("result.stdout closed")
retcode = result.returncode
stderr_output = result.stderr
else:
result = subprocess.run(cmd, cwd = where, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
retcode = result.returncode
stderr_output = result.stderr
if retcode > 0:
sys.stderr.write("ERROR cmd: {} return code {}\n".format(", ".join(cmd), retcode))
sys.stderr.write("stderr {}\n".format(stderr_output))
raise RuntimeError("bad return code")
except Exception as exception:
sys.stderr.write("Cmd was {}\n".format(", ".join(cmd)))
sys.stderr.write(
"An error occurred while running command [{}] error type: {}\n".format(", ".join(cmd), type(exception).__name__))
sys.stderr.write("Details: \n{}\n".format(str(exception)))
quit()
def run(cmd: List[str], where: Union[str, None] = None) -> None:
logger.debugln(" cmd: {} where: {}".format(",".join(cmd), where))
if not isinstance(cmd, list):
raise ValueError("cmd must be a list")
# exec_cmd handles failure of the command
exec_cmd(cmd, where)
if __name__ == '__main__':
logger.init(logger.LOG_LEVEL_WARN)
logger.set_stdout_logfile()
configure(arg_dry_run=False, arg_reporting_option=REPORTING_OPT_STDOUT_ONLY)
run(["wget", "http://whiteacorn.com"], None)
run(["tree", "/home/robert/Projects/smpl"])
configure(arg_dry_run=False, arg_reporting_option=REPORTING_OPT_STDERR_STDOUT_PROGRESS)
run(["tree", "/home/robert/Projects/smpl"])
configure(arg_dry_run=False, arg_reporting_option=REPORTING_OPT_STDOUT_ONLY)
run(["tree", "/xhome/robert/Projects/smpl"])
|
[
"sys.stdout.write",
"subprocess.run",
"subprocess.Popen",
"os.getcwd",
"smpl.log_module.init",
"smpl.log_module.set_stdout_logfile"
] |
[((4732, 4766), 'smpl.log_module.init', 'logger.init', (['logger.LOG_LEVEL_WARN'], {}), '(logger.LOG_LEVEL_WARN)\n', (4743, 4766), True, 'import smpl.log_module as logger\n'), ((4771, 4798), 'smpl.log_module.set_stdout_logfile', 'logger.set_stdout_logfile', ([], {}), '()\n', (4796, 4798), True, 'import smpl.log_module as logger\n'), ((2226, 2237), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2235, 2237), False, 'import os\n'), ((2373, 2403), 'subprocess.run', 'subprocess.run', (['cmd'], {'cwd': 'where'}), '(cmd, cwd=where)\n', (2387, 2403), False, 'import subprocess\n'), ((2535, 2589), 'subprocess.run', 'subprocess.run', (['cmd'], {'cwd': 'where', 'stderr': 'subprocess.PIPE'}), '(cmd, cwd=where, stderr=subprocess.PIPE)\n', (2549, 2589), False, 'import subprocess\n'), ((2763, 2817), 'subprocess.run', 'subprocess.run', (['cmd'], {'cwd': 'where', 'stdout': 'subprocess.PIPE'}), '(cmd, cwd=where, stdout=subprocess.PIPE)\n', (2777, 2817), False, 'import subprocess\n'), ((3024, 3109), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'cwd': 'where', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, cwd=where, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n', (3040, 3109), False, 'import subprocess\n'), ((3408, 3432), 'sys.stdout.write', 'sys.stdout.write', (['"""YY\n"""'], {}), "('YY\\n')\n", (3424, 3432), False, 'import sys\n'), ((3678, 3756), 'subprocess.run', 'subprocess.run', (['cmd'], {'cwd': 'where', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, cwd=where, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (3692, 3756), False, 'import subprocess\n'), ((3292, 3313), 'sys.stdout.write', 'sys.stdout.write', (['"""X"""'], {}), "('X')\n", (3308, 3313), False, 'import sys\n'), ((3199, 3221), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (3215, 3221), False, 'import sys\n')]
|
import logging
logger = logging.getLogger(__name__)
def make_file_safe_api_name(api_name):
"""Make an api name safe for use in a file name"""
return "".join([c for c in api_name if c.isalpha() or c.isdigit() or c in (".", "_", "-")])
|
[
"logging.getLogger"
] |
[((25, 52), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (42, 52), False, 'import logging\n')]
|
#!/usr/bin/env python
#coding:utf-8
# Author: mozman --<<EMAIL>>
# Purpose: test clock_val_parser
# Created: 03.11.2010
# Copyright (C) 2010, <NAME>
# License: GPLv3
import sys
import unittest
PYTHON3 = sys.version_info[0] > 2
if PYTHON3:
import svgwrite.data.pyparsing_py3 as pp
else:
import svgwrite.data.pyparsing_py2 as pp
from svgwrite.data.svgparser import _build_clock_val_parser
from svgwrite.data.svgparser import _build_wall_clock_val_parser
class TestClockValParser(unittest.TestCase):
clock_val_parser = _build_clock_val_parser()
def is_valid(self, value):
try:
self.clock_val_parser.parseString(value, parseAll=True)
return True
except pp.ParseException:
return False
def test_full_clock_values(self):
self.assertTrue(self.is_valid("02:30:03"))
self.assertTrue(self.is_valid("01:00:00"))
self.assertTrue(self.is_valid("50:00:10.25"))
def test_partial_clock_values(self):
self.assertTrue(self.is_valid("02:33"))
self.assertTrue(self.is_valid("00:10.5"))
def test_time_count_values(self):
self.assertTrue(self.is_valid("3.2h"))
self.assertTrue(self.is_valid("45min"))
self.assertTrue(self.is_valid("30s"))
self.assertTrue(self.is_valid("5ms"))
self.assertTrue(self.is_valid("12.467"))
class TestWallClockValParser(unittest.TestCase):
wallclock_parser = _build_wall_clock_val_parser()
def is_valid(self, value):
try:
self.wallclock_parser.parseString(value, parseAll=True)
return True
except pp.ParseException:
return False
def test_date_plus_hhmm(self):
# Complete date plus hours and minutes:
# YYYY-MM-DDThh:mmTZD (e.g. 1997-07-16T19:20+01:00)
self.assertTrue(self.is_valid("1997-07-16T19:20+01:00"))
def test_date_plus_hhmmss(self):
# Complete date plus hours, minutes and seconds:
# YYYY-MM-DDThh:mm:ssTZD (e.g. 1997-07-16T19:20:30+01:00)
self.assertTrue(self.is_valid("1997-07-16T19:20:30+01:00"))
def test_date_plus_hhmmss_frac(self):
# Complete date plus hours, minutes, seconds and a decimal fraction of a second
# YYYY-MM-DDThh:mm:ss.sTZD (e.g. 1997-07-16T19:20:30.45+01:00)
self.assertTrue(self.is_valid("1997-07-16T19:20:30.45+01:00"))
if __name__=='__main__':
unittest.main()
|
[
"unittest.main",
"svgwrite.data.svgparser._build_clock_val_parser",
"svgwrite.data.svgparser._build_wall_clock_val_parser"
] |
[((556, 581), 'svgwrite.data.svgparser._build_clock_val_parser', '_build_clock_val_parser', ([], {}), '()\n', (579, 581), False, 'from svgwrite.data.svgparser import _build_clock_val_parser\n'), ((1487, 1517), 'svgwrite.data.svgparser._build_wall_clock_val_parser', '_build_wall_clock_val_parser', ([], {}), '()\n', (1515, 1517), False, 'from svgwrite.data.svgparser import _build_wall_clock_val_parser\n'), ((2484, 2499), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2497, 2499), False, 'import unittest\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2015 www.suishouguan.com
#
# Licensed under the Private License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/samuelbaizg/ssguan/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from ssguan.ignitor.base import context
from ssguan.ignitor.base.error import NoFoundError
from ssguan.ignitor.etl.model import IncrExtract, IncrExtractLog
from ssguan.ignitor.utility import kind, parallel
__lock = parallel.create_lock()
def get_extract_timespan(ie_name, code_path, first_time=None, start_delta=IncrExtract.DEFAULT_START_DELTA, end_delta=IncrExtract.DEFAULT_END_DELTA):
"""
Get extract timespan.
:param ie_name|str: the incrment extract job name.
:param code_path|str: the increment job code path.
:param first_time|datetime: it will be converted to utc time to save.
:param start_delta|float: the delta to compute extract start time.
:param end_delta|float: the delta to compute extract end time.
:return tuple(datetime,datetime): return (start_time,end_time)
"""
query = IncrExtract.all()
query.filter("ie_name =", ie_name)
__lock.acquire()
try:
incrextr = query.get()
if incrextr is None:
start_delta = IncrExtract.DEFAULT_START_DELTA if start_delta is None else float(start_delta)
end_delta = IncrExtract.DEFAULT_END_DELTA if end_delta is None else float(end_delta)
first_time = (kind.utcnow() - datetime.timedelta(seconds=end_delta)) if first_time is None else first_time
first_time = kind.local_to_utc(first_time)
first_time = kind.datetime_floor(first_time)
last_time = first_time - datetime.timedelta(seconds=start_delta)
last_time = kind.datetime_floor(first_time)
incrextr = IncrExtract(ie_name=ie_name, code_path=code_path, first_time=first_time, start_delta=start_delta, end_delta=end_delta, last_time=last_time)
incrextr = incrextr.create(context.get_user_id())
start_time = incrextr.last_time - datetime.timedelta(seconds=incrextr.start_delta)
end_time = kind.utcnow() - datetime.timedelta(seconds=incrextr.end_delta)
end_time = kind.datetime_floor(end_time)
return (start_time, end_time)
finally:
__lock.release()
def update_last_extr_time(ie_name, last_extr_time):
"""
Update last extract time
:param ie_nme|str: the extractor name
:param last_extr_time|datetime: the last extract time
"""
query = IncrExtract.all()
query.filter("ie_name =", ie_name)
incrextr = query.get()
if incrextr is None:
raise NoFoundError('Extractor', ie_name)
log = IncrExtractLog(ie_id=incrextr.key(), ie_name=ie_name, extr_time=last_extr_time)
log.create(context.get_user_id())
query.set("last_time set", last_extr_time)
query.update(context.get_user_id())
return True
|
[
"ssguan.ignitor.utility.kind.utcnow",
"ssguan.ignitor.etl.model.IncrExtract.all",
"ssguan.ignitor.utility.parallel.create_lock",
"ssguan.ignitor.base.context.get_user_id",
"ssguan.ignitor.etl.model.IncrExtract",
"ssguan.ignitor.utility.kind.datetime_floor",
"datetime.timedelta",
"ssguan.ignitor.utility.kind.local_to_utc",
"ssguan.ignitor.base.error.NoFoundError"
] |
[((880, 902), 'ssguan.ignitor.utility.parallel.create_lock', 'parallel.create_lock', ([], {}), '()\n', (900, 902), False, 'from ssguan.ignitor.utility import kind, parallel\n'), ((1549, 1566), 'ssguan.ignitor.etl.model.IncrExtract.all', 'IncrExtract.all', ([], {}), '()\n', (1564, 1566), False, 'from ssguan.ignitor.etl.model import IncrExtract, IncrExtractLog\n'), ((3067, 3084), 'ssguan.ignitor.etl.model.IncrExtract.all', 'IncrExtract.all', ([], {}), '()\n', (3082, 3084), False, 'from ssguan.ignitor.etl.model import IncrExtract, IncrExtractLog\n'), ((2720, 2749), 'ssguan.ignitor.utility.kind.datetime_floor', 'kind.datetime_floor', (['end_time'], {}), '(end_time)\n', (2739, 2749), False, 'from ssguan.ignitor.utility import kind, parallel\n'), ((3194, 3228), 'ssguan.ignitor.base.error.NoFoundError', 'NoFoundError', (['"""Extractor"""', 'ie_name'], {}), "('Extractor', ie_name)\n", (3206, 3228), False, 'from ssguan.ignitor.base.error import NoFoundError\n'), ((3336, 3357), 'ssguan.ignitor.base.context.get_user_id', 'context.get_user_id', ([], {}), '()\n', (3355, 3357), False, 'from ssguan.ignitor.base import context\n'), ((3425, 3446), 'ssguan.ignitor.base.context.get_user_id', 'context.get_user_id', ([], {}), '()\n', (3444, 3446), False, 'from ssguan.ignitor.base import context\n'), ((2055, 2084), 'ssguan.ignitor.utility.kind.local_to_utc', 'kind.local_to_utc', (['first_time'], {}), '(first_time)\n', (2072, 2084), False, 'from ssguan.ignitor.utility import kind, parallel\n'), ((2111, 2142), 'ssguan.ignitor.utility.kind.datetime_floor', 'kind.datetime_floor', (['first_time'], {}), '(first_time)\n', (2130, 2142), False, 'from ssguan.ignitor.utility import kind, parallel\n'), ((2246, 2277), 'ssguan.ignitor.utility.kind.datetime_floor', 'kind.datetime_floor', (['first_time'], {}), '(first_time)\n', (2265, 2277), False, 'from ssguan.ignitor.utility import kind, parallel\n'), ((2302, 2445), 'ssguan.ignitor.etl.model.IncrExtract', 'IncrExtract', ([], {'ie_name': 'ie_name', 'code_path': 'code_path', 'first_time': 'first_time', 'start_delta': 'start_delta', 'end_delta': 'end_delta', 'last_time': 'last_time'}), '(ie_name=ie_name, code_path=code_path, first_time=first_time,\n start_delta=start_delta, end_delta=end_delta, last_time=last_time)\n', (2313, 2445), False, 'from ssguan.ignitor.etl.model import IncrExtract, IncrExtractLog\n'), ((2568, 2616), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'incrextr.start_delta'}), '(seconds=incrextr.start_delta)\n', (2586, 2616), False, 'import datetime\n'), ((2637, 2650), 'ssguan.ignitor.utility.kind.utcnow', 'kind.utcnow', ([], {}), '()\n', (2648, 2650), False, 'from ssguan.ignitor.utility import kind, parallel\n'), ((2653, 2699), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'incrextr.end_delta'}), '(seconds=incrextr.end_delta)\n', (2671, 2699), False, 'import datetime\n'), ((2181, 2220), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'start_delta'}), '(seconds=start_delta)\n', (2199, 2220), False, 'import datetime\n'), ((2482, 2503), 'ssguan.ignitor.base.context.get_user_id', 'context.get_user_id', ([], {}), '()\n', (2501, 2503), False, 'from ssguan.ignitor.base import context\n'), ((1936, 1949), 'ssguan.ignitor.utility.kind.utcnow', 'kind.utcnow', ([], {}), '()\n', (1947, 1949), False, 'from ssguan.ignitor.utility import kind, parallel\n'), ((1952, 1989), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'end_delta'}), '(seconds=end_delta)\n', (1970, 1989), False, 'import datetime\n')]
|
from models import ChapterPayments
from baseapp import db
class ChapterPaymentsController:
def __init__(self):
pass
def add(self, payment):
existing = False
payments = ChapterPayments.query.filter_by(received_date=payment['received_date']).all()
for existing_payment in payments:
if existing_payment.received_from == payment['received_from'] \
and existing_payment.received_amount == payment['received_amount'] \
and existing_payment.payment_type == payment['payment_type']:
existing = True
break
if not existing:
new_payment = ChapterPayments()
new_payment.received_from = payment['received_from']
new_payment.received_date = payment['received_date']
new_payment.received_amount = payment['received_amount']
new_payment.payment_type = payment['payment_type']
new_payment.cpc = payment['cpc']
new_payment.chapter = payment['chapter']
db.session.add(new_payment)
db.session.commit()
return True
return False
def edit(self, payment):
existing_payment = ChapterPayments.query.filter_by(id=payment['id']).first()
if existing_payment:
existing_payment.received_from = payment['received_from']
existing_payment.received_date = payment['received_date']
existing_payment.received_amount = payment['received_amount']
existing_payment.payment_type = payment['payment_type']
existing_payment.cpc = payment['cpc']
existing_payment.chapter = payment['chapter']
db.session.commit()
return True
return False
def delete(self, payment):
existing_payment = ChapterPayments.query.filter_by(id=payment['id']).first()
if existing_payment:
db.session.delete(existing_payment)
db.session.commit()
return True
return False
|
[
"baseapp.db.session.add",
"models.ChapterPayments.query.filter_by",
"baseapp.db.session.delete",
"baseapp.db.session.commit",
"models.ChapterPayments"
] |
[((688, 705), 'models.ChapterPayments', 'ChapterPayments', ([], {}), '()\n', (703, 705), False, 'from models import ChapterPayments\n'), ((1078, 1105), 'baseapp.db.session.add', 'db.session.add', (['new_payment'], {}), '(new_payment)\n', (1092, 1105), False, 'from baseapp import db\n'), ((1118, 1137), 'baseapp.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1135, 1137), False, 'from baseapp import db\n'), ((1730, 1749), 'baseapp.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1747, 1749), False, 'from baseapp import db\n'), ((1954, 1989), 'baseapp.db.session.delete', 'db.session.delete', (['existing_payment'], {}), '(existing_payment)\n', (1971, 1989), False, 'from baseapp import db\n'), ((2002, 2021), 'baseapp.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2019, 2021), False, 'from baseapp import db\n'), ((203, 274), 'models.ChapterPayments.query.filter_by', 'ChapterPayments.query.filter_by', ([], {'received_date': "payment['received_date']"}), "(received_date=payment['received_date'])\n", (234, 274), False, 'from models import ChapterPayments\n'), ((1241, 1290), 'models.ChapterPayments.query.filter_by', 'ChapterPayments.query.filter_by', ([], {'id': "payment['id']"}), "(id=payment['id'])\n", (1272, 1290), False, 'from models import ChapterPayments\n'), ((1855, 1904), 'models.ChapterPayments.query.filter_by', 'ChapterPayments.query.filter_by', ([], {'id': "payment['id']"}), "(id=payment['id'])\n", (1886, 1904), False, 'from models import ChapterPayments\n')]
|
from typing import Any, List, Literal, TypedDict
from .FHIR_canonical import FHIR_canonical
from .FHIR_code import FHIR_code
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_DataRequirement_CodeFilter import FHIR_DataRequirement_CodeFilter
from .FHIR_DataRequirement_DateFilter import FHIR_DataRequirement_DateFilter
from .FHIR_DataRequirement_Sort import FHIR_DataRequirement_Sort
from .FHIR_Element import FHIR_Element
from .FHIR_positiveInt import FHIR_positiveInt
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
# Describes a required data item for evaluation in terms of the type of data, and optional code or date-based filters of the data.
FHIR_DataRequirement = TypedDict(
"FHIR_DataRequirement",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# The type of the required data, specified as the type name of a resource. For profiles, this value is set to the type of the base resource of the profile.
"type": FHIR_code,
# Extensions for type
"_type": FHIR_Element,
# The profile of the required data, specified as the uri of the profile definition.
"profile": List[FHIR_canonical],
# The intended subjects of the data requirement. If this element is not provided, a Patient subject is assumed.
"subjectCodeableConcept": FHIR_CodeableConcept,
# The intended subjects of the data requirement. If this element is not provided, a Patient subject is assumed.
"subjectReference": FHIR_Reference,
# Indicates that specific elements of the type are referenced by the knowledge module and must be supported by the consumer in order to obtain an effective evaluation. This does not mean that a value is required for this element, only that the consuming system must understand the element and be able to provide values for it if they are available. The value of mustSupport SHALL be a FHIRPath resolveable on the type of the DataRequirement. The path SHALL consist only of identifiers, constant indexers, and .resolve() (see the [Simple FHIRPath Profile](fhirpath.html#simple) for full details).
"mustSupport": List[FHIR_string],
# Extensions for mustSupport
"_mustSupport": List[FHIR_Element],
# Code filters specify additional constraints on the data, specifying the value set of interest for a particular element of the data. Each code filter defines an additional constraint on the data, i.e. code filters are AND'ed, not OR'ed.
"codeFilter": List[FHIR_DataRequirement_CodeFilter],
# Date filters specify additional constraints on the data in terms of the applicable date range for specific elements. Each date filter specifies an additional constraint on the data, i.e. date filters are AND'ed, not OR'ed.
"dateFilter": List[FHIR_DataRequirement_DateFilter],
# Specifies a maximum number of results that are required (uses the _count search parameter).
"limit": FHIR_positiveInt,
# Extensions for limit
"_limit": FHIR_Element,
# Specifies the order of the results to be returned.
"sort": List[FHIR_DataRequirement_Sort],
},
total=False,
)
|
[
"typing.TypedDict"
] |
[((721, 1271), 'typing.TypedDict', 'TypedDict', (['"""FHIR_DataRequirement"""', "{'id': FHIR_string, 'extension': List[Any], 'type': FHIR_code, '_type':\n FHIR_Element, 'profile': List[FHIR_canonical], 'subjectCodeableConcept':\n FHIR_CodeableConcept, 'subjectReference': FHIR_Reference, 'mustSupport':\n List[FHIR_string], '_mustSupport': List[FHIR_Element], 'codeFilter':\n List[FHIR_DataRequirement_CodeFilter], 'dateFilter': List[\n FHIR_DataRequirement_DateFilter], 'limit': FHIR_positiveInt, '_limit':\n FHIR_Element, 'sort': List[FHIR_DataRequirement_Sort]}"], {'total': '(False)'}), "('FHIR_DataRequirement', {'id': FHIR_string, 'extension': List[Any\n ], 'type': FHIR_code, '_type': FHIR_Element, 'profile': List[\n FHIR_canonical], 'subjectCodeableConcept': FHIR_CodeableConcept,\n 'subjectReference': FHIR_Reference, 'mustSupport': List[FHIR_string],\n '_mustSupport': List[FHIR_Element], 'codeFilter': List[\n FHIR_DataRequirement_CodeFilter], 'dateFilter': List[\n FHIR_DataRequirement_DateFilter], 'limit': FHIR_positiveInt, '_limit':\n FHIR_Element, 'sort': List[FHIR_DataRequirement_Sort]}, total=False)\n", (730, 1271), False, 'from typing import Any, List, Literal, TypedDict\n')]
|
import requests
import json
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
LOLQUIZ_CMS_URL = 'https://lolquiz-cms.herokuapp.com/questions?_sort=id&_limit={}&_start={}'
HTTP_STATUS_ERROR_CODES = [408, 502, 503, 504]
TOTAL_QUESTIONS = 100
INIT_OFFSET = 0
def get_questions(url=LOLQUIZ_CMS_URL):
s = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=HTTP_STATUS_ERROR_CODES)
s.mount('https://', HTTPAdapter(max_retries=retries))
offset = INIT_OFFSET
all_questions = []
while True:
url = LOLQUIZ_CMS_URL.format(TOTAL_QUESTIONS, offset)
response = s.get(url)
if not has_content(response):
break
all_questions.append(json.loads(response.content))
offset += TOTAL_QUESTIONS
return [set_question(question)
for outer_questions in all_questions
for question in outer_questions if valid_question(question)]
def has_content(response):
return len(json.loads(response.content)) > 0
def valid_question(question):
return 'id' in question \
and 'title' in question \
and 'game_type' in question and question['game_type'] is not None \
and 'game_modes' in question and len(question['game_modes']) > 0 \
and 'options' in question
def set_question(question):
return {'id': question['id'],
'title': question['title'],
'game_type': question['game_type'],
'options': question['options'],
'game_modes': question['game_modes']}
|
[
"requests.packages.urllib3.util.retry.Retry",
"requests.Session",
"json.loads",
"requests.adapters.HTTPAdapter"
] |
[((355, 373), 'requests.Session', 'requests.Session', ([], {}), '()\n', (371, 373), False, 'import requests\n'), ((388, 462), 'requests.packages.urllib3.util.retry.Retry', 'Retry', ([], {'total': '(5)', 'backoff_factor': '(1)', 'status_forcelist': 'HTTP_STATUS_ERROR_CODES'}), '(total=5, backoff_factor=1, status_forcelist=HTTP_STATUS_ERROR_CODES)\n', (393, 462), False, 'from requests.packages.urllib3.util.retry import Retry\n'), ((487, 519), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'retries'}), '(max_retries=retries)\n', (498, 519), False, 'from requests.adapters import HTTPAdapter\n'), ((763, 791), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (773, 791), False, 'import json\n'), ((1029, 1057), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (1039, 1057), False, 'import json\n')]
|
import sys
import ReadFile
import pickle
import World
import importlib.util
import os.path as osp
import policy_generator as pg
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator
import numpy as np
def module_from_file(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def get_example_path():
return sys.argv[1]
def get_config_path(path):
config_filepath=osp.join(path,'config.txt')
return config_filepath
def get_file_paths(example_path,config_obj):
# File Names
locations_filename=None
agents_filename=osp.join(example_path,config_obj.agents_filename)
interactions_FilesList_filename=osp.join(example_path,config_obj.interactions_files_list)
events_FilesList_filename=osp.join(example_path,config_obj.events_files_list)
if config_obj.locations_filename=="":
locations_filename=None
else:
locations_filename=osp.join(example_path,config_obj.locations_filename)
return agents_filename, interactions_FilesList_filename, events_FilesList_filename, locations_filename
def get_file_names_list(example_path,interactions_FilesList_filename,events_FilesList_filename,config_obj):
# Reading through a file (for interactions/events) that contain file names which contain interactions and event details for a time step
interactions_files_list=None
events_files_list=None
if config_obj.interactions_files_list=='':
print('No Interaction files uploaded!')
else:
interactionFiles_obj=ReadFile.ReadFilesList(interactions_FilesList_filename)
interactions_files_list=list(map(lambda x : osp.join(example_path,x) ,interactionFiles_obj.file_list))
if interactions_files_list==[]:
print('No Interactions inputted')
if config_obj.events_files_list=='':
print('No Event files uploaded!')
else:
eventFiles_obj=ReadFile.ReadFilesList(events_FilesList_filename)
events_files_list=list(map(lambda x : osp.join(example_path,x) ,eventFiles_obj.file_list))
if events_files_list==[]:
print('No Events inputted')
return interactions_files_list, events_files_list
def get_model(example_path):
UserModel = module_from_file("Generate_model", osp.join(example_path,'UserModel.py'))
model = UserModel.UserModel()
return model
def get_policy(example_path):
Generate_policy = module_from_file("Generate_policy", osp.join(example_path,'Generate_policy.py'))
policy_list, event_restriction_fn=Generate_policy.generate_policy()
return policy_list, event_restriction_fn
if __name__=="__main__":
example_path = get_example_path()
config_filename = get_config_path(example_path)
# Read Config file using ReadFile.ReadConfiguration
config_obj=ReadFile.ReadConfiguration(config_filename)
agents_filename, interactions_FilesList_filename,\
events_FilesList_filename, locations_filename = get_file_paths(example_path,config_obj)
interactions_files_list, events_files_list = get_file_names_list(example_path,interactions_FilesList_filename,events_FilesList_filename,config_obj)
# User Model
model = get_model(example_path)
# policy_list, event_restriction_fn=get_policy(example_path)
##########################################################################################
num_tests = 90
ntpa_max=6
napt_max=6
X=np.arange(1, napt_max+1, 1)
Y=np.arange(1, ntpa_max+1, 1)
X,Y = np.meshgrid(X,Y)
print(X)
print(Y)
data_list={'Infected':np.zeros((ntpa_max,napt_max)),'False Positives':np.zeros((ntpa_max,napt_max)),'Quarantined':np.zeros((ntpa_max,napt_max))}
for i in range(napt_max):
for j in range(ntpa_max):
policy_list, event_restriction_fn = pg.generate_group_testing_tests_policy(num_tests, i+1, j+1)
world_obj=World.World(config_obj,model,policy_list,event_restriction_fn,agents_filename,interactions_files_list,locations_filename,events_files_list)
tdict, total_infection, total_quarantined_days, wrongly_quarantined_days, total_test_cost = world_obj.simulate_worlds(plot=False)
data_list['Infected'][j][i]=total_infection
data_list['False Positives'][j][i]=world_obj.total_false_positives
data_list['Quarantined'][j][i]=total_quarantined_days
print(data_list)
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
surf = ax.plot_surface(X, Y, np.array(data_list['False Positives']), cmap=cm.coolwarm,linewidth=0, antialiased=False)
plt.xlabel("Number of Agents per testtube")
plt.ylabel("Number of testtubes per agent")
plt.title("Pool testing strategies vs total false positives")
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
surf = ax.plot_surface(X, Y, np.array(data_list['Infected']), cmap=cm.coolwarm,linewidth=0, antialiased=False)
plt.xlabel("Number of Agents per testtube")
plt.ylabel("Number of testtubes per agent")
plt.title("Pool testing strategies vs total infections")
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
surf = ax.plot_surface(X, Y, np.array(data_list['Quarantined']), cmap=cm.coolwarm,linewidth=0, antialiased=False)
plt.xlabel("Number of Agents per testtube")
plt.ylabel("Number of testtubes per agent")
plt.title("Pool testing strategies vs total quarantine")
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
###############################################################################################
|
[
"matplotlib.pyplot.title",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"ReadFile.ReadConfiguration",
"ReadFile.ReadFilesList",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"policy_generator.generate_group_testing_tests_policy",
"numpy.arange",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"World.World"
] |
[((573, 601), 'os.path.join', 'osp.join', (['path', '"""config.txt"""'], {}), "(path, 'config.txt')\n", (581, 601), True, 'import os.path as osp\n'), ((740, 790), 'os.path.join', 'osp.join', (['example_path', 'config_obj.agents_filename'], {}), '(example_path, config_obj.agents_filename)\n', (748, 790), True, 'import os.path as osp\n'), ((826, 884), 'os.path.join', 'osp.join', (['example_path', 'config_obj.interactions_files_list'], {}), '(example_path, config_obj.interactions_files_list)\n', (834, 884), True, 'import os.path as osp\n'), ((914, 966), 'os.path.join', 'osp.join', (['example_path', 'config_obj.events_files_list'], {}), '(example_path, config_obj.events_files_list)\n', (922, 966), True, 'import os.path as osp\n'), ((2944, 2987), 'ReadFile.ReadConfiguration', 'ReadFile.ReadConfiguration', (['config_filename'], {}), '(config_filename)\n', (2970, 2987), False, 'import ReadFile\n'), ((3560, 3589), 'numpy.arange', 'np.arange', (['(1)', '(napt_max + 1)', '(1)'], {}), '(1, napt_max + 1, 1)\n', (3569, 3589), True, 'import numpy as np\n'), ((3594, 3623), 'numpy.arange', 'np.arange', (['(1)', '(ntpa_max + 1)', '(1)'], {}), '(1, ntpa_max + 1, 1)\n', (3603, 3623), True, 'import numpy as np\n'), ((3632, 3649), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (3643, 3649), True, 'import numpy as np\n'), ((4542, 4587), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': '3d'}"}), "(subplot_kw={'projection': '3d'})\n", (4554, 4587), True, 'import matplotlib.pyplot as plt\n'), ((4714, 4757), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Agents per testtube"""'], {}), "('Number of Agents per testtube')\n", (4724, 4757), True, 'import matplotlib.pyplot as plt\n'), ((4762, 4805), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of testtubes per agent"""'], {}), "('Number of testtubes per agent')\n", (4772, 4805), True, 'import matplotlib.pyplot as plt\n'), ((4810, 4871), 'matplotlib.pyplot.title', 'plt.title', (['"""Pool testing strategies vs total false positives"""'], {}), "('Pool testing strategies vs total false positives')\n", (4819, 4871), True, 'import matplotlib.pyplot as plt\n'), ((4921, 4931), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4929, 4931), True, 'import matplotlib.pyplot as plt\n'), ((4947, 4992), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': '3d'}"}), "(subplot_kw={'projection': '3d'})\n", (4959, 4992), True, 'import matplotlib.pyplot as plt\n'), ((5112, 5155), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Agents per testtube"""'], {}), "('Number of Agents per testtube')\n", (5122, 5155), True, 'import matplotlib.pyplot as plt\n'), ((5160, 5203), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of testtubes per agent"""'], {}), "('Number of testtubes per agent')\n", (5170, 5203), True, 'import matplotlib.pyplot as plt\n'), ((5208, 5264), 'matplotlib.pyplot.title', 'plt.title', (['"""Pool testing strategies vs total infections"""'], {}), "('Pool testing strategies vs total infections')\n", (5217, 5264), True, 'import matplotlib.pyplot as plt\n'), ((5314, 5324), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5322, 5324), True, 'import matplotlib.pyplot as plt\n'), ((5340, 5385), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': '3d'}"}), "(subplot_kw={'projection': '3d'})\n", (5352, 5385), True, 'import matplotlib.pyplot as plt\n'), ((5508, 5551), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Agents per testtube"""'], {}), "('Number of Agents per testtube')\n", (5518, 5551), True, 'import matplotlib.pyplot as plt\n'), ((5556, 5599), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of testtubes per agent"""'], {}), "('Number of testtubes per agent')\n", (5566, 5599), True, 'import matplotlib.pyplot as plt\n'), ((5604, 5660), 'matplotlib.pyplot.title', 'plt.title', (['"""Pool testing strategies vs total quarantine"""'], {}), "('Pool testing strategies vs total quarantine')\n", (5613, 5660), True, 'import matplotlib.pyplot as plt\n'), ((5710, 5720), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5718, 5720), True, 'import matplotlib.pyplot as plt\n'), ((1077, 1130), 'os.path.join', 'osp.join', (['example_path', 'config_obj.locations_filename'], {}), '(example_path, config_obj.locations_filename)\n', (1085, 1130), True, 'import os.path as osp\n'), ((1684, 1739), 'ReadFile.ReadFilesList', 'ReadFile.ReadFilesList', (['interactions_FilesList_filename'], {}), '(interactions_FilesList_filename)\n', (1706, 1739), False, 'import ReadFile\n'), ((2055, 2104), 'ReadFile.ReadFilesList', 'ReadFile.ReadFilesList', (['events_FilesList_filename'], {}), '(events_FilesList_filename)\n', (2077, 2104), False, 'import ReadFile\n'), ((2414, 2452), 'os.path.join', 'osp.join', (['example_path', '"""UserModel.py"""'], {}), "(example_path, 'UserModel.py')\n", (2422, 2452), True, 'import os.path as osp\n'), ((2593, 2637), 'os.path.join', 'osp.join', (['example_path', '"""Generate_policy.py"""'], {}), "(example_path, 'Generate_policy.py')\n", (2601, 2637), True, 'import os.path as osp\n'), ((3703, 3733), 'numpy.zeros', 'np.zeros', (['(ntpa_max, napt_max)'], {}), '((ntpa_max, napt_max))\n', (3711, 3733), True, 'import numpy as np\n'), ((3751, 3781), 'numpy.zeros', 'np.zeros', (['(ntpa_max, napt_max)'], {}), '((ntpa_max, napt_max))\n', (3759, 3781), True, 'import numpy as np\n'), ((3795, 3825), 'numpy.zeros', 'np.zeros', (['(ntpa_max, napt_max)'], {}), '((ntpa_max, napt_max))\n', (3803, 3825), True, 'import numpy as np\n'), ((4621, 4659), 'numpy.array', 'np.array', (["data_list['False Positives']"], {}), "(data_list['False Positives'])\n", (4629, 4659), True, 'import numpy as np\n'), ((5026, 5057), 'numpy.array', 'np.array', (["data_list['Infected']"], {}), "(data_list['Infected'])\n", (5034, 5057), True, 'import numpy as np\n'), ((5419, 5453), 'numpy.array', 'np.array', (["data_list['Quarantined']"], {}), "(data_list['Quarantined'])\n", (5427, 5453), True, 'import numpy as np\n'), ((3940, 4003), 'policy_generator.generate_group_testing_tests_policy', 'pg.generate_group_testing_tests_policy', (['num_tests', '(i + 1)', '(j + 1)'], {}), '(num_tests, i + 1, j + 1)\n', (3978, 4003), True, 'import policy_generator as pg\n'), ((4022, 4176), 'World.World', 'World.World', (['config_obj', 'model', 'policy_list', 'event_restriction_fn', 'agents_filename', 'interactions_files_list', 'locations_filename', 'events_files_list'], {}), '(config_obj, model, policy_list, event_restriction_fn,\n agents_filename, interactions_files_list, locations_filename,\n events_files_list)\n', (4033, 4176), False, 'import World\n'), ((1792, 1817), 'os.path.join', 'osp.join', (['example_path', 'x'], {}), '(example_path, x)\n', (1800, 1817), True, 'import os.path as osp\n'), ((2151, 2176), 'os.path.join', 'osp.join', (['example_path', 'x'], {}), '(example_path, x)\n', (2159, 2176), True, 'import os.path as osp\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.