code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from gym.envs.registration import register
register(
id='OdorEnvA-v0',
entry_point='odor_env.odor_env:OdorEnvA'
)
register(
id='OdorEnvB-v0',
entry_point='odor_env.odor_env:OdorEnvB'
)
|
[
"gym.envs.registration.register"
] |
[((44, 112), 'gym.envs.registration.register', 'register', ([], {'id': '"""OdorEnvA-v0"""', 'entry_point': '"""odor_env.odor_env:OdorEnvA"""'}), "(id='OdorEnvA-v0', entry_point='odor_env.odor_env:OdorEnvA')\n", (52, 112), False, 'from gym.envs.registration import register\n'), ((118, 186), 'gym.envs.registration.register', 'register', ([], {'id': '"""OdorEnvB-v0"""', 'entry_point': '"""odor_env.odor_env:OdorEnvB"""'}), "(id='OdorEnvB-v0', entry_point='odor_env.odor_env:OdorEnvB')\n", (126, 186), False, 'from gym.envs.registration import register\n')]
|
from py_cgr.py_cgr_lib.py_cgr_lib import *
import zmq
import time
import sys
import random
import json
import re
import getopt
argumentList = sys.argv[1:]
# Options
options = "hc"
try:
# Parsing argument
arguments, values = getopt.getopt(argumentList, options)
# checking each argument
for currentArgument, currentValue in arguments:
if currentArgument in ("-h"):
print ("Use the option -m to specify the contact plan file location ")
sys.exit(0)
elif currentArgument in ("-c"):
print ("Contact plan file :", sys.argv[2])
except getopt.error as err:
# output error, and return with an error code
print (str(err))
port = "4555"
context = zmq.Context()
socket = context.socket(zmq.PAIR)
socket.bind("tcp://127.0.0.1:%s" % port) #localhost caused error
contact_plan = cp_load(sys.argv[2], 5000)
#contact_plan = cp_load('module/scheduler/src/contactPlan.json', 5000)
curr_time = 0
while True:
msg = socket.recv()
print("message received by server")
splitMessage = re.findall('[0-9]+', msg.decode('utf-8'))
splitMessage = list(filter(None, splitMessage))
sourceId = int(splitMessage[0])
destinationId = int(splitMessage[1])
startTime = int(splitMessage[2])
root_contact = Contact(sourceId, sourceId, 0, sys.maxsize, 100, 1, 0)
root_contact.arrival_time = startTime
route = cgr_dijkstra(root_contact, destinationId, contact_plan)
print("***Here's the route")
print(route)
print("***Sending next hop: " + str(route.next_node))
socket.send_string(str(route.next_node))
time.sleep(1)
|
[
"sys.exit",
"getopt.getopt",
"zmq.Context",
"time.sleep"
] |
[((722, 735), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (733, 735), False, 'import zmq\n'), ((236, 272), 'getopt.getopt', 'getopt.getopt', (['argumentList', 'options'], {}), '(argumentList, options)\n', (249, 272), False, 'import getopt\n'), ((1607, 1620), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1617, 1620), False, 'import time\n'), ((489, 500), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (497, 500), False, 'import sys\n')]
|
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# core.py
# -----------------------------------------------------------------------------
# $Id$
#
# -----------------------------------------------------------------------------
# kaa-Metadata - Media Metadata for Python
# Copyright (C) 2003-2006 <NAME>, <NAME>
#
# First Edition: <NAME> <<EMAIL>>
# Maintainer: <NAME> <<EMAIL>>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
# python imports
import re
import logging
from . import fourcc
from . import language
from . import utils
UNPRINTABLE_KEYS = [ 'thumbnail', 'url', 'codec_private' ]
# media type definitions
MEDIA_AUDIO = 'MEDIA_AUDIO'
MEDIA_VIDEO = 'MEDIA_VIDEO'
MEDIA_IMAGE = 'MEDIA_IMAGE'
MEDIA_AV = 'MEDIA_AV'
MEDIA_SUBTITLE = 'MEDIA_SUBTITLE'
MEDIA_CHAPTER = 'MEDIA_CHAPTER'
MEDIA_DIRECTORY = 'MEDIA_DIRECTORY'
MEDIA_DISC = 'MEDIA_DISC'
MEDIA_GAME = 'MEDIA_GAME'
MEDIACORE = ['title', 'caption', 'comment', 'size', 'type', 'subtype', 'timestamp',
'keywords', 'country', 'language', 'langcode', 'url', 'media', 'artist',
'mime', 'datetime', 'tags', 'hash']
EXTENSION_DEVICE = 'device'
EXTENSION_DIRECTORY = 'directory'
EXTENSION_STREAM = 'stream'
# get logging object
log = logging.getLogger('metadata')
class ParseError(Exception):
pass
_features = {
# Guess if a file is a recording of a TV series. It matches names in the
# style of 'series 1x01 episode' and 'series s1e01 episode' where the
# delimiter may not be a space but also point or minus.
'VIDEO_SERIES_PARSER':
[ False, '(.+?)[\. _-]+[sS]?([0-9]|[0-9][0-9])[xeE]([0-9]|[0-9][0-9])[\. _-]+(.+)' ]
}
def enable_feature(var, value=None):
"""
Enable optional features defined in the _feature variable. Some
feature have a value. These values are set to reasonable default
values but can be overwritten by setting the optional parameter
value.
"""
_features[var][0] = True
if value:
_features[var][1] = value
def features():
"""
List all optional features
"""
return list(_features.keys())
def feature_enabled(feature):
"""
Returns if a feature was activated
"""
return _features[feature][0]
def feature_config(feature):
"""
Returns the configuration of the given feature
"""
return _features[feature][1]
class Media(object):
media = None
"""
Media is the base class to all Media Metadata Containers. It defines
the basic structures that handle metadata. Media and its derivates
contain a common set of metadata attributes that is listed in keys.
Specific derivates contain additional keys to the dublin core set that is
defined in Media.
"""
_keys = MEDIACORE
table_mapping = {}
def __init__(self, hash=None):
if hash is not None:
# create Media based on dict
for key, value in list(hash.items()):
if isinstance(value, list) and value and isinstance(value[0], dict):
value = [ Media(x) for x in value ]
self._set(key, value)
return
self._keys = self._keys[:]
self.tables = {}
# Tags, unlike tables, are more well-defined dicts whose values are
# either Tag objects, other dicts (for nested tags), or lists of either
# (for multiple instances of the tag, e.g. actor). Where possible,
# parsers should transform tag names to conform to the Official
# Matroska tags defined at http://www.matroska.org/technical/specs/tagging/index.html
# All tag names will be lower-cased.
self.tags = Tags()
for key in self._keys:
if key not in ('media', 'tags'):
setattr(self, key, None)
#
# unicode and string convertion for debugging
#
def __str__(self):
result = ''
# print normal attributes
lists = []
for key in self._keys:
value = getattr(self, key, None)
if value == None or key == 'url':
continue
if isinstance(value, list):
if not value:
continue
elif isinstance(value[0], str):
# Just a list of strings (keywords?), so don't treat it specially.
value = ', '.join(value)
else:
lists.append((key, value))
continue
elif isinstance(value, dict):
# Tables or tags treated separately.
continue
if key in UNPRINTABLE_KEYS:
value = '<unprintable data, size=%d>' % len(value)
result += '| %10s: %s\n' % (str(key), str(value))
# print tags (recursively, to support nested tags).
def print_tags(tags, suffix, show_label):
result = ''
for n, (name, tag) in enumerate(tags.items()):
result += '| %12s%s%s = ' % ('tags: ' if n == 0 and show_label else '', suffix, name)
if isinstance(tag, list):
# TODO: doesn't support lists/dicts within lists.
result += '%s\n' % ', '.join(subtag.value for subtag in tag)
else:
result += '%s\n' % (tag.value or '')
if isinstance(tag, dict):
result += print_tags(tag, ' ', False)
return result
result += print_tags(self.tags, '', True)
# print lists
for key, l in lists:
for n, item in enumerate(l):
label = '+-- ' + key.rstrip('s').capitalize()
if key not in ('tracks', 'subtitles', 'chapters'):
label += ' Track'
result += '%s #%d\n' % (label, n+1)
result += '| ' + re.sub(r'\n(.)', r'\n| \1', str(item))
# print tables
if log.level >= 10:
for name, table in list(self.tables.items()):
result += '+-- Table %s\n' % str(name)
for key, value in list(table.items()):
try:
value = str(value)
if len(value) > 50:
value = '<unprintable data, size=%d>' % len(value)
except (UnicodeDecodeError, TypeError) as e:
try:
value = '<unprintable data, size=%d>' % len(value)
except AttributeError:
value = '<unprintable data>'
result += '| | %s: %s\n' % (str(key), value)
return result
def __repr__(self):
if hasattr(self, 'url'):
return '<%s %s>' % (str(self.__class__)[8:-2], self.url)
else:
return '<%s>' % (str(self.__class__)[8:-2])
#
# internal functions
#
def _appendtable(self, name, hashmap):
"""
Appends a tables of additional metadata to the Object.
If such a table already exists, the given tables items are
added to the existing one.
"""
if name not in self.tables:
self.tables[name] = hashmap
else:
# Append to the already existing table
for k in list(hashmap.keys()):
self.tables[name][k] = hashmap[k]
def _set(self, key, value):
"""
Set key to value and add the key to the internal keys list if
missing.
"""
if value is None and getattr(self, key, None) is None:
return
if isinstance(value, str):
value = utils.tostr(value)
setattr(self, key, value)
if not key in self._keys:
self._keys.append(key)
def _set_url(self, url):
"""
Set the URL of the source
"""
self.url = url
def _finalize(self):
"""
Correct same data based on specific rules
"""
# make sure all strings are unicode
for key in self._keys:
if key in UNPRINTABLE_KEYS:
continue
value = getattr(self, key)
if value is None:
continue
if key == 'image':
if isinstance(value, str):
setattr(self, key, utils.tobytes(value))
continue
if isinstance(value, str):
setattr(self, key, utils.tostr(value))
if isinstance(value, str):
setattr(self, key, value.strip().rstrip().replace('\0', ''))
if isinstance(value, list) and value and isinstance(value[0], Media):
for submenu in value:
submenu._finalize()
# copy needed tags from tables
for name, table in list(self.tables.items()):
mapping = self.table_mapping.get(name, {})
for tag, attr in list(mapping.items()):
if self.get(attr):
continue
value = table.get(tag, None)
if value is not None:
if not isinstance(value, str):
value = utils.tostr(str(value))
elif isinstance(value, str):
value = utils.tostr(value)
value = value.strip().rstrip().replace('\0', '')
setattr(self, attr, value)
if 'fourcc' in self._keys and 'codec' in self._keys and self.codec is not None:
# Codec may be a fourcc, in which case we resolve it to its actual
# name and set the fourcc attribute.
self.fourcc, self.codec = fourcc.resolve(self.codec)
if 'language' in self._keys:
self.langcode, self.language = language.resolve(self.language)
#
# data access
#
def __contains__(self, key):
"""
Test if key exists in the dict
"""
return hasattr(self, key)
def get(self, attr, default = None):
"""
Returns the given attribute. If the attribute is not set by
the parser return 'default'.
"""
return getattr(self, attr, default)
def __getitem__(self, attr):
"""
Get the value of the given attribute
"""
return getattr(self, attr, None)
def __setitem__(self, key, value):
"""
Set the value of 'key' to 'value'
"""
setattr(self, key, value)
def has_key(self, key):
"""
Check if the object has an attribute 'key'
"""
return hasattr(self, key)
def convert(self):
"""
Convert Media to dict.
"""
result = {}
for k in self._keys:
value = getattr(self, k, None)
if isinstance(value, list) and value and isinstance(value[0], Media):
value = [ x.convert() for x in value ]
result[k] = value
return result
def keys(self):
"""
Return all keys for the attributes set by the parser.
"""
return self._keys
class Collection(Media):
"""
Collection of Digial Media like CD, DVD, Directory, Playlist
"""
_keys = Media._keys + [ 'id', 'tracks' ]
def __init__(self):
Media.__init__(self)
self.tracks = []
class Tag(object):
"""
An individual tag, which will be a value stored in a Tags object.
Tag values are strings (for binary data), unicode objects, or datetime
objects for tags that represent dates or times.
"""
def __init__(self, value=None, langcode='und', binary=False):
super(Tag, self).__init__()
self.value = value
self.langcode = langcode
self.binary = binary
def __unicode__(self):
return str(self.value)
def __str__(self):
return str(self.value)
def __repr__(self):
if not self.binary:
return '<Tag object: %s>' % repr(self.value)
else:
return '<Binary Tag object: size=%d>' % len(self.value)
@property
def langcode(self):
return self._langcode
@langcode.setter
def langcode(self, code):
self._langcode, self.language = language.resolve(code)
class Tags(dict, Tag):
"""
A dictionary containing Tag objects. Values can be other Tags objects
(for nested tags), lists, or Tag objects.
A Tags object is more or less a dictionary but it also contains a value.
This is necessary in order to represent this kind of tag specification
(e.g. for Matroska)::
<Simple>
<Name>LAW_RATING</Name>
<String>PG</String>
<Simple>
<Name>COUNTRY</Name>
<String>US</String>
</Simple>
</Simple>
The attribute RATING has a value (PG), but it also has a child tag
COUNTRY that specifies the country code the rating belongs to.
"""
def __init__(self, value=None, langcode='und', binary=False):
super(Tags, self).__init__()
self.value = value
self.langcode = langcode
self.binary = False
|
[
"logging.getLogger"
] |
[((2121, 2150), 'logging.getLogger', 'logging.getLogger', (['"""metadata"""'], {}), "('metadata')\n", (2138, 2150), False, 'import logging\n')]
|
import sys
import os
import numpy as np
import cv2
import json
from collections import defaultdict
import unittest
import torch
sys.path.insert(0, os.path.abspath('')) # Test files from current path rather than installed module
from pymlutil.jsonutil import *
test_config = 'test.yaml'
class Test(unittest.TestCase):
def test_cmd(self):
result, _, _ = cmd('ls -la', check=True, timeout=5)
self.assertEqual(result, 0)
def test_yaml(self):
test = ReadDict(test_config)
assert test is not None
assert 'test_yaml' in test
self.assertEqual(test['test_yaml'][0]['zero'], 0)
self.assertEqual(test['test_yaml'][1]['one'], 1)
self.assertEqual(test['test_yaml'][2]['two'], 2)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.path.abspath"
] |
[((148, 167), 'os.path.abspath', 'os.path.abspath', (['""""""'], {}), "('')\n", (163, 167), False, 'import os\n'), ((774, 789), 'unittest.main', 'unittest.main', ([], {}), '()\n', (787, 789), False, 'import unittest\n')]
|
# -*- coding: utf-8 -*-
"""Test proxy digest authentication
"""
import unittest
import requests
import requests_toolbelt
class TestProxyDigestAuth(unittest.TestCase):
def setUp(self):
self.username = "username"
self.password = "password"
self.auth = requests_toolbelt.auth.HTTPProxyDigestAuth(
self.username, self.password
)
self.auth.last_nonce = "bH3FVAAAAAAg74rL3X8AAI3CyBAAAAAA"
self.auth.chal = {
'nonce': self.auth.last_nonce,
'realm': '<EMAIL>',
'qop': 'auth'
}
self.prepared_request = requests.Request(
'GET',
'http://host.org/index.html'
).prepare()
def test_proxy_digest(self):
"""Test if it will generate Proxy-Authorization header
when nonce presents.
Digest authentication's correctness will not be tested here.
"""
# prepared_request headers should be clear before calling auth
assert not self.prepared_request.headers.get('Proxy-Authorization')
self.auth(self.prepared_request)
assert self.prepared_request.headers.get('Proxy-Authorization')
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"requests_toolbelt.auth.HTTPProxyDigestAuth",
"requests.Request"
] |
[((1210, 1225), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1223, 1225), False, 'import unittest\n'), ((282, 354), 'requests_toolbelt.auth.HTTPProxyDigestAuth', 'requests_toolbelt.auth.HTTPProxyDigestAuth', (['self.username', 'self.password'], {}), '(self.username, self.password)\n', (324, 354), False, 'import requests_toolbelt\n'), ((613, 666), 'requests.Request', 'requests.Request', (['"""GET"""', '"""http://host.org/index.html"""'], {}), "('GET', 'http://host.org/index.html')\n", (629, 666), False, 'import requests\n')]
|
"""
"""
import sys
import re
import shutil
import json
if len(sys.argv) < 3:
print('Not enough input arguments')
exit()
################################################################################
# Options
comment = {'begin':'<!--', 'end':'-->'}
################################################################################
errors = []
in_file_path = sys.argv[1]
out_file_path = in_file_path
data_file_path = sys.argv[2]
if len(sys.argv) >= 4:
out_file_path = sys.argv[3]
else:
shutil.copyfile(out_file_path, out_file_path + '.tpl')
# Data
json1_file = open(data_file_path)
json1_str = json1_file.read()
data = json.loads(json1_str)
in_file = open(in_file_path)
in_lines = in_file.readlines()
out_lines = []
for in_line in in_lines:
if '<REPLACED>' in in_line or '<IGNORE>' in in_line or '<ERROR>' in in_line:
continue
# Find patterns
out_lines.append(in_line)
prog = re.compile(r'<REPLACE:([a-zA-Z0-9_]+)>')
key_list = prog.findall(in_line)
# Find
number_of_elem = 0
is_list = False
is_valid_list = False
for key in key_list:
if key in data and isinstance(data[key], list):
if is_list:
is_valid_list = is_valid_list and (len(data[key])==number_of_elem)
else:
number_of_elem = len(data[key])
is_valid_list = True
is_list = True
number_of_loop = number_of_elem
if number_of_loop == 0:
number_of_loop = 1
if is_list and not is_valid_list:
number_of_loop = 0
error = '<ERROR> Data list length are not consistent.'
errors.append(error)
out_lines.append(comment['begin'] + ' ' + error + comment['end'] + '\n')
for i in range(0,number_of_loop):
out_line = in_line
out_line = re.sub(r'^ *' + comment['begin'] + ' *(.*)' + comment['end'] + ' *', '\g<1>', out_line)
out_line = out_line.replace('\n', '')
for key in key_list:
if key in data:
if isinstance(data[key], list):
value = data[key][i]
else:
value = data[key]
out_line = out_line.replace('<REPLACE:' + key + '>', str(value))
else:
out_line = out_line.replace('<REPLACE:' + key + '>', '')
if len(key_list) > 0:
if key in data:
out_lines.append(out_line + ' ' + comment['begin'] + ' <REPLACED> ' + comment['end'] + '\n')
else:
error = '<ERROR> Key \'' + key + '\' not exiting.';
errors.append(error)
out_lines.append(comment['begin'] + ' ' + error + ' ' + comment['end'] + '\n')
out_file = open(out_file_path, 'w')
for out_line in out_lines:
out_file.write(out_line)
if len(errors) > 0:
print('\n***ERRORS***\n')
print(str(len(errors)) + ' errors in templating process:')
for error in errors:
print('\t' + error)
else:
print('No error in templating process')
|
[
"shutil.copyfile",
"re.sub",
"json.loads",
"re.compile"
] |
[((644, 665), 'json.loads', 'json.loads', (['json1_str'], {}), '(json1_str)\n', (654, 665), False, 'import json\n'), ((510, 564), 'shutil.copyfile', 'shutil.copyfile', (['out_file_path', "(out_file_path + '.tpl')"], {}), "(out_file_path, out_file_path + '.tpl')\n", (525, 564), False, 'import shutil\n'), ((930, 969), 're.compile', 're.compile', (['"""<REPLACE:([a-zA-Z0-9_]+)>"""'], {}), "('<REPLACE:([a-zA-Z0-9_]+)>')\n", (940, 969), False, 'import re\n'), ((1820, 1911), 're.sub', 're.sub', (["('^ *' + comment['begin'] + ' *(.*)' + comment['end'] + ' *')", '"""\\\\g<1>"""', 'out_line'], {}), "('^ *' + comment['begin'] + ' *(.*)' + comment['end'] + ' *',\n '\\\\g<1>', out_line)\n", (1826, 1911), False, 'import re\n')]
|
from django.db import connection
import numpy as np
def getstudentcoursewisePLO(studentID, courseID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as plonum,100*(sum(e.obtainedMarks)/sum(a.totalMarks)) as plopercent
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and r.student_id = '{}'
and co.course_id = '{}'
GROUP BY p.ploID
'''.format(studentID, courseID))
row = cursor.fetchall()
return row
def getcoursewiseavgPLO(courseID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as plonum, avg(100*e.obtainedMarks/a.totalMarks)
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and co.course_id = '{}'
GROUP BY p.ploID
'''.format(courseID))
row = cursor.fetchall()
return row
def getcompletedcourses(studentID):
with connection.cursor() as cursor:
cursor.execute(
'''
SELECT distinct s.course_id
FROM app_registration_t r,
app_evaluation_t e,
app_section_t s
WHERE r.registrationID = e.registration_id
and r.section_id = s.sectionID
and r.student_id = '{}'
'''.format(studentID))
row = cursor.fetchall()
return row
def getcorrespondingstudentid(userID):
with connection.cursor() as cursor:
cursor.execute(
'''
SELECT studentID
FROM app_student_t s
WHERE s.user_ptr_id = '{}'
'''.format(userID))
row = cursor.fetchall()
return row
def getstudentprogramwisePLO(studentID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as plonum,100*(sum(e.obtainedMarks)/sum(a.totalMarks)) as plopercent
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_student_t s,
app_program_t pr
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and r.student_id = '{}'
and s.studentID = r.student_id
and s.program_id = pr.programID
GROUP BY p.ploID
'''.format(studentID))
row = cursor.fetchall()
return row
def getprogramwiseavgPLO(programID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as plonum, avg(100*e.obtainedMarks/a.totalMarks)
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.program_id = '{}'
GROUP BY p.ploID
'''.format(programID))
row = cursor.fetchall()
return row
def getstudentprogramid(studentID):
with connection.cursor() as cursor:
cursor.execute('''
SELECT s.program_id
FROM app_student_t s
WHERE s.studentID = '{}'
'''.format(studentID))
row = cursor.fetchall()
return row
def getstudentallcoursePLO(studentID, category):
with connection.cursor() as cursor:
cursor.execute('''
SELECT p.ploNum as ploNum,co.course_id,sum(e.obtainedMarks),sum(a.totalMarks), derived.Total
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
(
SELECT p.ploNum as ploNum,sum(a.totalMarks) as Total, r.student_id as StudentID
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and r.student_id = '{}'
GROUP BY r.student_id,p.ploID) derived
WHERE r.student_id = derived.StudentID
and e.registration_id = r.registrationID
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.ploNum = derived.ploNum
GROUP BY p.ploID,co.course_id
'''.format(studentID))
row = cursor.fetchall()
table = []
courses = []
for entry in row:
if entry[1] not in courses:
courses.append(entry[1])
courses.sort()
plo = ["PLO1", "PLO2", "PLO3", "PLO4", "PLO5", "PLO6", "PLO7", "PLO8", "PLO9", "PLO10", "PLO11", "PLO12"]
for i in courses:
temptable = []
if category == 'report':
temptable = [i]
for j in plo:
found = False
for k in row:
if j == k[0] and i == k[1]:
if category == 'report':
temptable.append(np.round(100 * k[2] / k[3], 2))
elif category == 'chart':
temptable.append(np.round(100 * k[2] / k[4], 2))
found = True
if not found:
if category == 'report':
temptable.append('N/A')
elif category == 'chart':
temptable.append(0)
table.append(temptable)
return plo, courses, table
def getfacultycoursewisePLO(courseID, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.first_name, f.last_name, f.plonum, COUNT(*) as achieved_cnt
FROM
(
SELECT u.first_name, u.last_name, p.ploNum as plonum, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
accounts_user u,
app_employee_t emp
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and a.section_id = s.sectionID
and s.faculty_id IN
(
SELECT DISTINCT s.faculty_id
FROM app_section_t s
WHERE s.course_id = '{}'
)
and s.semester IN ({})
and s.course_id ='{}'
and s.faculty_id = emp.employeeID
and emp.user_ptr_id = u.id
)f
WHERE f.percentage >= 40
GROUP BY f.first_name, f.plonum;
'''.format(courseID, sem, courseID))
row1 = cursor.fetchall()
cursor.execute('''
SELECT COUNT(*)
FROM
(
SELECT u.first_name, u.last_name, p.ploNum as plonum, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
accounts_user u,
app_employee_t emp
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and a.section_id = s.sectionID
and s.faculty_id IN
(
SELECT DISTINCT s.faculty_id
FROM app_section_t s
WHERE s.course_id = '{}'
)
and s.semester IN ({})
and s.course_id ='{}'
and s.faculty_id = emp.employeeID
and emp.user_ptr_id = u.id
)f
GROUP BY f.first_name, f.plonum;
'''.format(courseID, sem, courseID))
row2 = cursor.fetchall()
faculty = []
plonum = []
plos1 = []
plos2 = []
for record in row1:
faculty.append(record[0]+' '+record[1])
plonum.append(record[2])
plos1.append(record[3])
for record in row2:
plos2.append(record[0])
plos = 100*(np.array(plos1)/np.array(plos2))
plos = plos.tolist()
faculty = list(set(faculty))
plonum = list(set(plonum))
plonum.sort()
plonum.sort(key=len, reverse=False)
plos = np.array(plos)
plos = np.split(plos, len(plos)/len(plonum))
new_plo=[]
for plo in plos:
new_plo.append(plo.tolist())
return faculty, plonum, new_plo
def getsemestercoursewisePLO(courseID, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.semester, f.plonum, COUNT(*) as achieved_cnt
FROM
(
SELECT s.semester, p.ploNum as plonum, s.course_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and a.section_id = s.sectionID
and s.semester IN ({})
and co.course_id ='{}'
and s.course_id = co.course_id
)f
WHERE f.percentage >= 40
GROUP BY f.semester, f.plonum;
'''.format(sem, courseID))
row1 = cursor.fetchall()
cursor.execute('''
SELECT COUNT(*) as all_cnt
FROM
(
SELECT s.semester, p.ploNum as plonum, s.course_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and a.section_id = s.sectionID
and s.semester IN ({})
and co.course_id ='{}'
and s.course_id = co.course_id
)f
GROUP BY f.semester, f.plonum;
'''.format(sem, courseID))
row2 = cursor.fetchall()
semester = []
plonum = []
acheived = []
all_cnt = []
for record in row1:
semester.append(record[0])
plonum.append(record[1])
acheived.append(record[2])
for record in row2:
all_cnt.append(record[0])
acheived_per = 100*(np.array(acheived)/np.array(all_cnt))
semester = list(set(semester))
plonum = list(set(plonum))
failed_per = 100 - acheived_per
acheived_per = np.split(acheived_per, len(acheived_per)/len(semester))
failed_per = np.split(failed_per, len(failed_per)/len(semester))
acheived=[]
for plo in acheived_per:
acheived.append(plo.tolist())
failed=[]
for plo in failed_per:
failed.append(plo.tolist())
return semester, plonum, acheived, failed
def getplowisecoursecomparism(plos, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
ploo = '';
for plo in plos:
ploo += '"'
ploo += plo
ploo += '",'
ploo = ploo[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.course_id, f.ploNum, COUNT(*)
FROM
(
SELECT s.course_id, p.ploNum, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.ploNum in ({})
and a.section_id = s.sectionID
and s.semester IN ({})
)f
WHERE f.percentage >= 40
GROUP BY f.ploNum, f.course_id;
'''.format(ploo, sem))
row1 = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('''
SELECT COUNT(*)
FROM
(
SELECT s.course_id, p.ploNum, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.ploNum in ({})
and a.section_id = s.sectionID
and s.semester IN ({})
)f
GROUP BY f.ploNum, f.course_id;
'''.format(ploo, sem))
row2 = cursor.fetchall()
courses = []
plonum = []
acheived = []
all_cnt = []
for record in row1:
courses.append(record[0])
plonum.append(record[1])
acheived.append(record[2])
for record in row2:
all_cnt.append(record[0])
acheived_per = 100*(np.array(acheived)/np.array(all_cnt))
courses = list(set(courses))
plonum = list(set(plonum))
acheived_per = np.split(acheived_per, len(acheived_per)/len(plonum))
acheived=[]
for plo in acheived_per:
acheived.append(plo.tolist())
return courses, plonum, acheived
def getprogramsemesterwiseplocount(program, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.plonum, COUNT(*)
FROM
(
SELECT p.ploNum as plonum, r.student_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
app_program_t prog
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.program_id = prog.programID
and prog.programName = '{}'
and a.section_id = s.sectionID
and s.semester IN ({})
)f
WHERE f.percentage>=40
GROUP BY f.plonum;
'''.format(program, sem))
row1 = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('''
SELECT COUNT(*)
FROM
(
SELECT p.ploNum as plonum, r.student_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
app_program_t prog
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.program_id = prog.programID
and prog.programName = '{}'
and a.section_id = s.sectionID
and s.semester IN ({})
)f
GROUP BY f.plonum;
'''.format(program, sem))
row2 = cursor.fetchall()
plonum = []
acheived = []
attempted = []
for record in row1:
plonum.append(record[0])
acheived.append(record[1])
for record in row2:
attempted.append(record[0])
plonum = list(set(plonum))
acheived = np.array(acheived)
attempted = np.array(attempted)
new_acheived=[]
for plo in acheived:
new_acheived.append(plo.tolist())
new_attempted=[]
for plo in attempted:
new_attempted.append(plo.tolist())
plonum.sort()
plonum.sort(key=len, reverse=False)
return plonum, new_acheived, new_attempted
def getprogramwiseploandcourses(program, semesters):
sem = '';
for semester in semesters:
sem += '"'
sem += semester
sem += '",'
sem = sem[:-1]
with connection.cursor() as cursor:
cursor.execute('''
SELECT f.ploNum, f.course_id, COUNT(*)
FROM
(
SELECT p.ploNum as plonum, s.course_id, r.student_id, 100*e.obtainedMarks/a.totalMarks as percentage
FROM app_registration_t r,
app_assessment_t a,
app_evaluation_t e,
app_co_t co,
app_plo_t p,
app_section_t s,
app_program_t prog
WHERE r.registrationID = e.registration_id
and e.assessment_id = a.assessmentID
and a.co_id=co.coID
and co.plo_id = p.ploID
and p.program_id = prog.programID
and prog.programName = '{}'
and a.section_id = s.sectionID
and s.semester IN ({})
)f
WHERE f.percentage>=40
GROUP BY f.ploNum, f.course_id
'''.format(program, sem))
row = cursor.fetchall()
plonum = []
courses = []
counts = []
for record in row:
plonum.append(record[0])
courses.append(record[1])
plonum = list(set(plonum))
plonum.sort()
plonum.sort(key=len, reverse=False)
courses = list(set(courses))
courses.sort()
table = np.zeros((len(courses), len(plonum)))
for record in row:
table[courses.index(record[1])][plonum.index(record[0])] += record[2]
table = table.tolist()
return plonum, courses, table
|
[
"numpy.round",
"numpy.array",
"django.db.connection.cursor"
] |
[((112, 131), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (129, 131), False, 'from django.db import connection\n'), ((930, 949), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (947, 949), False, 'from django.db import connection\n'), ((1672, 1691), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (1689, 1691), False, 'from django.db import connection\n'), ((2186, 2205), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (2203, 2205), False, 'from django.db import connection\n'), ((2502, 2521), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (2519, 2521), False, 'from django.db import connection\n'), ((3445, 3464), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (3462, 3464), False, 'from django.db import connection\n'), ((4189, 4208), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (4206, 4208), False, 'from django.db import connection\n'), ((4508, 4527), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (4525, 4527), False, 'from django.db import connection\n'), ((7192, 7211), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (7209, 7211), False, 'from django.db import connection\n'), ((10761, 10775), 'numpy.array', 'np.array', (['plos'], {}), '(plos)\n', (10769, 10775), True, 'import numpy as np\n'), ((11155, 11174), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (11172, 11174), False, 'from django.db import connection\n'), ((14719, 14738), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (14736, 14738), False, 'from django.db import connection\n'), ((15853, 15872), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (15870, 15872), False, 'from django.db import connection\n'), ((17774, 17793), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (17791, 17793), False, 'from django.db import connection\n'), ((18929, 18948), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (18946, 18948), False, 'from django.db import connection\n'), ((20318, 20336), 'numpy.array', 'np.array', (['acheived'], {}), '(acheived)\n', (20326, 20336), True, 'import numpy as np\n'), ((20357, 20376), 'numpy.array', 'np.array', (['attempted'], {}), '(attempted)\n', (20365, 20376), True, 'import numpy as np\n'), ((20896, 20915), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (20913, 20915), False, 'from django.db import connection\n'), ((10543, 10558), 'numpy.array', 'np.array', (['plos1'], {}), '(plos1)\n', (10551, 10558), True, 'import numpy as np\n'), ((10559, 10574), 'numpy.array', 'np.array', (['plos2'], {}), '(plos2)\n', (10567, 10574), True, 'import numpy as np\n'), ((13831, 13849), 'numpy.array', 'np.array', (['acheived'], {}), '(acheived)\n', (13839, 13849), True, 'import numpy as np\n'), ((13850, 13867), 'numpy.array', 'np.array', (['all_cnt'], {}), '(all_cnt)\n', (13858, 13867), True, 'import numpy as np\n'), ((17239, 17257), 'numpy.array', 'np.array', (['acheived'], {}), '(acheived)\n', (17247, 17257), True, 'import numpy as np\n'), ((17258, 17275), 'numpy.array', 'np.array', (['all_cnt'], {}), '(all_cnt)\n', (17266, 17275), True, 'import numpy as np\n'), ((6562, 6592), 'numpy.round', 'np.round', (['(100 * k[2] / k[3])', '(2)'], {}), '(100 * k[2] / k[3], 2)\n', (6570, 6592), True, 'import numpy as np\n'), ((6681, 6711), 'numpy.round', 'np.round', (['(100 * k[2] / k[4])', '(2)'], {}), '(100 * k[2] / k[4], 2)\n', (6689, 6711), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-05-17 15:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('taxonomy', '0009_boardprecinct_precinct_map'),
('dashboard', '0008_generalsettings_gatrackingid'),
]
operations = [
migrations.AddField(
model_name='generalsettings',
name='location',
field=models.ForeignKey(blank=True, help_text='<span>Select the primary location for the intity this site represents. This list is managed by the webmaster.</span>', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='dashboard_generalsettings_location', to='taxonomy.Location', verbose_name='Primary Location'),
),
]
|
[
"django.db.models.ForeignKey"
] |
[((512, 854), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""<span>Select the primary location for the intity this site represents. This list is managed by the webmaster.</span>"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""dashboard_generalsettings_location"""', 'to': '"""taxonomy.Location"""', 'verbose_name': '"""Primary Location"""'}), "(blank=True, help_text=\n '<span>Select the primary location for the intity this site represents. This list is managed by the webmaster.</span>'\n , null=True, on_delete=django.db.models.deletion.PROTECT, related_name=\n 'dashboard_generalsettings_location', to='taxonomy.Location',\n verbose_name='Primary Location')\n", (529, 854), False, 'from django.db import migrations, models\n')]
|
from pathlib import Path
from argparse import ArgumentParser
from msdsl import MixedSignalModel, VerilogGenerator
from msdsl.expr.extras import if_
def main():
print('Running model generator...')
# parse command line arguments
parser = ArgumentParser()
parser.add_argument('-o', '--output', type=str, default='build')
parser.add_argument('--dt', type=float, default=0.1e-6)
a = parser.parse_args()
# create the model
m = MixedSignalModel('osc', dt=a.dt)
m.add_digital_input('emu_clk')
m.add_digital_input('emu_rst')
m.add_digital_output('dt_req', 32)
m.add_digital_input('emu_dt', 32)
m.add_digital_output('clk_val')
m.add_digital_input('t_lo', 32)
m.add_digital_input('t_hi', 32)
# determine if the request was granted
m.bind_name('req_grant', m.dt_req == m.emu_dt)
# update the clock value
m.add_digital_state('prev_clk_val')
m.set_next_cycle(m.prev_clk_val, m.clk_val, clk=m.emu_clk, rst=m.emu_rst)
m.set_this_cycle(m.clk_val, if_(m.req_grant, ~m.prev_clk_val, m.prev_clk_val))
# determine the next period
m.bind_name('dt_req_next', if_(m.prev_clk_val, m.t_lo, m.t_hi))
# increment the time request
m.bind_name('dt_req_incr', m.dt_req - m.emu_dt)
# determine the next period
m.bind_name('dt_req_imm', if_(m.req_grant, m.dt_req_next, m.dt_req_incr))
m.set_next_cycle(m.dt_req, m.dt_req_imm, clk=m.emu_clk, rst=m.emu_rst, check_format=False)
# determine the output filename
filename = Path(a.output).resolve() / f'{m.module_name}.sv'
print(f'Model will be written to: {filename}')
# generate the model
m.compile_to_file(VerilogGenerator(), filename)
if __name__ == '__main__':
main()
|
[
"msdsl.expr.extras.if_",
"argparse.ArgumentParser",
"msdsl.MixedSignalModel",
"pathlib.Path",
"msdsl.VerilogGenerator"
] |
[((250, 266), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (264, 266), False, 'from argparse import ArgumentParser\n'), ((456, 488), 'msdsl.MixedSignalModel', 'MixedSignalModel', (['"""osc"""'], {'dt': 'a.dt'}), "('osc', dt=a.dt)\n", (472, 488), False, 'from msdsl import MixedSignalModel, VerilogGenerator\n'), ((1019, 1068), 'msdsl.expr.extras.if_', 'if_', (['m.req_grant', '(~m.prev_clk_val)', 'm.prev_clk_val'], {}), '(m.req_grant, ~m.prev_clk_val, m.prev_clk_val)\n', (1022, 1068), False, 'from msdsl.expr.extras import if_\n'), ((1134, 1169), 'msdsl.expr.extras.if_', 'if_', (['m.prev_clk_val', 'm.t_lo', 'm.t_hi'], {}), '(m.prev_clk_val, m.t_lo, m.t_hi)\n', (1137, 1169), False, 'from msdsl.expr.extras import if_\n'), ((1320, 1366), 'msdsl.expr.extras.if_', 'if_', (['m.req_grant', 'm.dt_req_next', 'm.dt_req_incr'], {}), '(m.req_grant, m.dt_req_next, m.dt_req_incr)\n', (1323, 1366), False, 'from msdsl.expr.extras import if_\n'), ((1663, 1681), 'msdsl.VerilogGenerator', 'VerilogGenerator', ([], {}), '()\n', (1679, 1681), False, 'from msdsl import MixedSignalModel, VerilogGenerator\n'), ((1515, 1529), 'pathlib.Path', 'Path', (['a.output'], {}), '(a.output)\n', (1519, 1529), False, 'from pathlib import Path\n')]
|
import random
def partition(arr, left, right):
l, r, tmp = left, right, arr[left]
while l != r:
while l < r and arr[r] >= tmp: r -= 1
while l < r and arr[l] <= tmp: l += 1
arr[l], arr[r] = arr[r], arr[l]
arr[l], arr[left] = arr[left], arr[l]
return l
def quick_sort(arr, left, right):
if left <= right:
pivot = partition(arr, left, right)
quick_sort(arr, left, pivot - 1)
quick_sort(arr, pivot + 1, right)
def main():
num = 20
range_left = 0
range_right = 10000
arr = [random.randint(range_left, range_right) for _ in range(num)]
print('Original array:')
print(arr)
quick_sort(arr, 0, len(arr) - 1)
print('Sorted array:')
print(arr)
if __name__ == '__main__':
main()
|
[
"random.randint"
] |
[((560, 599), 'random.randint', 'random.randint', (['range_left', 'range_right'], {}), '(range_left, range_right)\n', (574, 599), False, 'import random\n')]
|
# Scrap all the brick mosaics for PHAT
import os
from os.path import join as osjoin
output = "/home/ekoch/bigdata/ekoch/M31/PHAT/"
baseurl = "https://archive.stsci.edu/pub/hlsp/phat/"
# This is easier than webscraping right now.
brick_dict = {1: 12058,
2: 12073,
3: 12109,
4: 12107,
5: 12074,
6: 12105,
7: 12113,
8: 12075,
9: 12057,
10: 12111,
11: 12115,
12: 12071,
13: 12114,
14: 12072,
15: 12056,
16: 12106,
17: 12059,
18: 12108,
19: 12110,
20: 12112,
21: 12055,
22: 12076,
23: 12070}
for i in range(1, 24):
if i < 10:
brickurl = f"{baseurl}/brick0{i}"
acs_475 = f"hlsp_phat_hst_acs-wfc_{brick_dict[i]}-m31-b0{i}_f475w_v1_drz.fits"
acs_814 = f"hlsp_phat_hst_acs-wfc_{brick_dict[i]}-m31-b0{i}_f814w_v1_drz.fits"
wfcir_110 = f"hlsp_phat_hst_wfc3-ir_{brick_dict[i]}-m31-b0{i}_f110w_v1_drz.fits"
wfcir_160 = f"hlsp_phat_hst_wfc3-ir_{brick_dict[i]}-m31-b0{i}_f160w_v1_drz.fits"
wfcuv_275 = f"hlsp_phat_hst_wfc3-uvis_{brick_dict[i]}-m31-b0{i}_f275w_v1_drz.fits"
wfcuv_336 = f"hlsp_phat_hst_wfc3-uvis_{brick_dict[i]}-m31-b0{i}_f336w_v1_drz.fits"
else:
brickurl = f"{baseurl}/brick{i}"
acs_475 = f"hlsp_phat_hst_acs-wfc_{brick_dict[i]}-m31-b{i}_f475w_v1_drz.fits"
acs_814 = f"hlsp_phat_hst_acs-wfc_{brick_dict[i]}-m31-b{i}_f814w_v1_drz.fits"
wfcir_110 = f"hlsp_phat_hst_wfc3-ir_{brick_dict[i]}-m31-b{i}_f110w_v1_drz.fits"
wfcir_160 = f"hlsp_phat_hst_wfc3-ir_{brick_dict[i]}-m31-b{i}_f160w_v1_drz.fits"
wfcuv_275 = f"hlsp_phat_hst_wfc3-uvis_{brick_dict[i]}-m31-b{i}_f275w_v1_drz.fits"
wfcuv_336 = f"hlsp_phat_hst_wfc3-uvis_{brick_dict[i]}-m31-b{i}_f336w_v1_drz.fits"
print(f"Downloading brick {i}")
brick_path = osjoin(output, f"brick{i}")
if not os.path.exists(brick_path):
os.mkdir(brick_path)
os.chdir(brick_path)
for file in [acs_475, acs_814, wfcir_110, wfcir_160, wfcuv_275, wfcuv_336]:
# Check if we need to download again
if os.path.exists(file):
continue
os.system(f"wget {osjoin(brickurl, file)}")
# os.system(f"wget {osjoin(brickurl, acs_814)}")
# os.system(f"wget {osjoin(brickurl, wfcir_110)}")
# os.system(f"wget {osjoin(brickurl, wfcir_160)}")
# os.system(f"wget {osjoin(brickurl, wfcuv_275)}")
# os.system(f"wget {osjoin(brickurl, wfcuv_336)}")
|
[
"os.mkdir",
"os.path.join",
"os.chdir",
"os.path.exists"
] |
[((2053, 2080), 'os.path.join', 'osjoin', (['output', 'f"""brick{i}"""'], {}), "(output, f'brick{i}')\n", (2059, 2080), True, 'from os.path import join as osjoin\n'), ((2154, 2174), 'os.chdir', 'os.chdir', (['brick_path'], {}), '(brick_path)\n', (2162, 2174), False, 'import os\n'), ((2092, 2118), 'os.path.exists', 'os.path.exists', (['brick_path'], {}), '(brick_path)\n', (2106, 2118), False, 'import os\n'), ((2128, 2148), 'os.mkdir', 'os.mkdir', (['brick_path'], {}), '(brick_path)\n', (2136, 2148), False, 'import os\n'), ((2313, 2333), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (2327, 2333), False, 'import os\n'), ((2383, 2405), 'os.path.join', 'osjoin', (['brickurl', 'file'], {}), '(brickurl, file)\n', (2389, 2405), True, 'from os.path import join as osjoin\n')]
|
#Color dict
import numpy as np
# import wandb
import cv2
import torch
import os
colors = {
'0':[(128, 64, 128), (244, 35, 232), (0, 0, 230), (220, 190, 40), (70, 70, 70), (70, 130, 180), (0, 0, 0)],
'1':[(128, 64, 128), (250, 170, 160), (244, 35, 232), (230, 150, 140), (220, 20, 60), (255, 0, 0), (0, 0, 230), (255, 204, 54), (0, 0, 70), (220, 190, 40), (190, 153, 153), (174, 64, 67), (153, 153, 153), (70, 70, 70), (107, 142, 35), (70, 130, 180)],
'2':[(128, 64, 128), (250, 170, 160), (244, 35, 232), (230, 150, 140), (220, 20, 60), (255, 0, 0), (0, 0, 230), (119, 11, 32), (255, 204, 54), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 0, 90), (220, 190, 40), (102, 102, 156), (190, 153, 153), (180, 165, 180), (174, 64, 67), (220, 220, 0), (250, 170, 30), (153, 153, 153), (169, 187, 214), (70, 70, 70), (150, 100, 100), (107, 142, 35), (70, 130, 180)],
'3':[(128, 64, 128), (250, 170, 160), (81, 0, 81), (244, 35, 232), (230, 150, 140), (152, 251, 152), (220, 20, 60), (246, 198, 145), (255, 0, 0), (0, 0, 230), (119, 11, 32), (255, 204, 54), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 0, 90), (0, 0, 110), (0, 80, 100), (136, 143, 153), (220, 190, 40), (102, 102, 156), (190, 153, 153), (180, 165, 180), (174, 64, 67), (220, 220, 0), (250, 170, 30), (153, 153, 153), (153, 153, 153), (169, 187, 214), (70, 70, 70), (150, 100, 100), (150, 120, 90), (107, 142, 35), (70, 130, 180), (169, 187, 214), (0, 0, 142)]
}
def visualize(mask,n_classes,ignore_label,gt = None):
if(n_classes<len(colors['0'])):
id = 0
elif(n_classes<len(colors['1'])):
id = 1
elif(n_classes<len(colors['2'])):
id = 2
else:
id = 3
out_mask = np.zeros((mask.shape[0],mask.shape[1],3))
for i in range(n_classes):
out_mask[mask == i] = colors[str(id)][i]
if(gt is not None):
out_mask[gt == ignore_label] = (255,255,255)
out_mask[np.where((out_mask == [0, 0, 0]).all(axis=2))] = (255,255,255)
return out_mask
def error_map(pred,gt,cfg):
canvas = pred.copy()
canvas[canvas == gt] = 255
canvas[gt == cfg.Loss.ignore_label] = 255
return canvas
# def segmentation_validation_visualization(epoch,sample,pred,batch_size,class_labels,wandb_image,cfg):
# os.makedirs(os.path.join(cfg.train.output_dir,'Visualization',str(epoch)),exist_ok = True)
# input = sample['image'].permute(0,2,3,1).detach().cpu().numpy()
# label = sample['label'].detach().cpu().numpy().astype(np.uint8)
# pred = torch.argmax(pred[0],dim = 1).detach().cpu().numpy().astype(np.uint8)
# for i in range(batch_size):
# errormap = error_map(pred[i],label[i],cfg)
# wandb_image.append(wandb.Image(cv2.resize(cv2.cvtColor(input[i], cv2.COLOR_BGR2RGB),(cfg.dataset.width//4,cfg.dataset.height//4)), masks={
# "predictions" : {
# "mask_data" : cv2.resize(pred[i],(cfg.dataset.width//4,cfg.dataset.height//4)),
# "class_labels" : class_labels
# },
# "ground_truth" : {
# "mask_data" : cv2.resize(label[i],(cfg.dataset.width//4,cfg.dataset.height//4)),
# "class_labels" : class_labels
# }
# ,
# "error_map" : {
# "mask_data" : cv2.resize(errormap,(cfg.dataset.width//4,cfg.dataset.height//4)),
# "class_labels" : class_labels
# }
# }))
# if(cfg.valid.write):
# prediction = visualize(pred[i],cfg.model.n_classes,cfg.Loss.ignore_label,gt = label[i])
# mask = visualize(label[i],cfg.model.n_classes,cfg.Loss.ignore_label,gt = label[i])
# out = np.concatenate([((input[i]* np.array(cfg.dataset.mean) + np.array(cfg.dataset.std))*255).astype(int),mask,prediction,visualize(errormap,cfg.model.n_classes,cfg.Loss.ignore_label,label[i])],axis = 1)
# cv2.imwrite(os.path.join(cfg.train.output_dir,'Visualization',str(epoch),sample['img_name'][i]),out)
# return wandb_image
|
[
"numpy.zeros"
] |
[((1692, 1735), 'numpy.zeros', 'np.zeros', (['(mask.shape[0], mask.shape[1], 3)'], {}), '((mask.shape[0], mask.shape[1], 3))\n', (1700, 1735), True, 'import numpy as np\n')]
|
'''
Contains the NetSnmp() class
Typical contents of file /proc/net/snmp::
Ip: Forwarding DefaultTTL InReceives InHdrErrors InAddrErrors ForwDatagrams
InUnknownProtos InDiscards InDelivers OutRequests OutDiscards OutNoRoutes
ReasmTimeout ReasmReqds ReasmOKs ReasmFails FragOKs FragFails FragCreates
Ip: 1 64 2354322 0 0 0 0 0 2282006 2066446 0 0 0 0 0 0 0 0 0
Icmp: InMsgs InErrors InDestUnreachs InTimeExcds InParmProbs InSrcQuenchs
InRedirects InEchos InEchoReps InTimestamps InTimestampReps InAddrMasks
InAddrMaskReps OutMsgs OutErrors OutDestUnreachs OutTimeExcds
OutParmProbs OutSrcQuenchs OutRedirects OutEchos OutEchoReps
OutTimestamps OutTimestampReps OutAddrMasks OutAddrMaskReps
Icmp: 172 0 91 0 0 0 0 81 0 0 0 0 0 168 0 87 0 0 0 0 0 81 0 0 0 0
IcmpMsg: InType3 InType8 OutType0 OutType3
IcmpMsg: 91 81 81 87
Tcp: RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens PassiveOpens AttemptFails
EstabResets CurrEstab InSegs OutSegs RetransSegs InErrs OutRsts
Tcp: 1 200 120000 -1 70054 4198 337 2847 43 1880045 1741596 7213 0 3044
Udp: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors
Udp: 344291 8 376 317708 0 0
UdpLite: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors
UdpLite: 0 0 0 0 0 0
'''
from logging import getLogger
from os import path as ospath
from .readfile import ReadFile
LOGGER = getLogger(__name__)
class NetSnmp(ReadFile):
'''
NetSnmp handling
'''
FILENAME = ospath.join('proc', 'net', 'snmp')
KEY = 'netsnmp'
def normalize(self):
'''
Translates data into dictionary
The net/snmp file is a series of records keyed on subcategories
'''
LOGGER.debug("Normalize")
lines = self.lines
ret = {}
fkey = ''
fvals = []
for i, line in enumerate(lines):
top, tail = line.split(':')
key = top.lstrip()
vals = tail.lstrip().split()
if i % 2:
if fkey == key:
ret[key] = dict(
zip(
fvals,
[int(val) for val in vals]
)
)
else:
fkey = key
fvals = vals
return ret
|
[
"os.path.join",
"logging.getLogger"
] |
[((1416, 1435), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (1425, 1435), False, 'from logging import getLogger\n'), ((1515, 1549), 'os.path.join', 'ospath.join', (['"""proc"""', '"""net"""', '"""snmp"""'], {}), "('proc', 'net', 'snmp')\n", (1526, 1549), True, 'from os import path as ospath\n')]
|
"""
@author: <NAME>, Energy Information Networks & Systems @ TU Darmstadt
"""
import numpy as np
import tensorflow as tf
from models.igc import ImplicitGenerativeCopula, GMMNCopula
from models.utils import cdf_interpolator
import pyvinecopulib as pv
from models import mv_copulas
import matplotlib.pyplot as plt
class CopulaAutoEncoder(object):
def __init__(self, x, ae_model):
if isinstance(ae_model, str):
ae_model = tf.keras.models.load_model(ae_model)
self.encoder_model = ae_model.encoder
self.decoder_model = ae_model.decoder
self.z = self._encode(x)
self.margins = self._fit_margins(self.z)
self.u = self._cdf(self.z)
def _encode(self, x):
# encode images to latent space
return self.encoder_model(x).numpy()
def _decode(self, z):
# decode latent space samples to images
return self.decoder_model(z).numpy()
def _cdf(self, z):
# get pseudo obs
u = np.zeros_like(z)
for i in range(u.shape[1]):
u[:,i] = self.margins[i].cdf(z[:,i])
return u
def _ppf(self, u):
# inverse marginal cdf
z = np.zeros_like(u)
for i in range(z.shape[1]):
z[:,i] = self.margins[i].ppf(u[:,i])
return z
def _fit_margins(self, z):
# get the marginal distributions via ecdf interpolation
margins = []
for i in range(z.shape[1]):
margins.append(cdf_interpolator(z[:,i],
kind="linear",
x_min=np.min(z[:,i])-np.diff(np.sort(z[:,i])[0:2])[0],
x_max=np.max(z[:,i])+np.diff(np.sort(z[:,i])[-2:])[0]))
return margins
def _sample_u(self, n_samples=1):
# sample from copula
return self.copula.simulate(n_samples)
def _sample_z(self, n_samples=1, u=None):
# sample from latent space
if u is None:
return self._ppf(self._sample_u(n_samples))
else:
return self._ppf(u)
def sample_images(self, n_samples=1, z=None):
# sample an image
if z is None:
return self._decode(self._sample_z(n_samples))
else:
return self._decode(z)
def show_images(self, n=5, imgs=None, cmap="gray", title=None):
if imgs is None:
imgs = self.sample_images(n)
plt.figure(figsize=(16, 3))
for i in range(n):
ax = plt.subplot(1, n, i+1)
plt.imshow(np.squeeze(imgs[i]*255), vmin=0, vmax=255, cmap=cmap)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.suptitle(title)
plt.tight_layout()
class IGCAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with Implicit Generative Copula """
def fit(self, epochs=100, batch_size=100, n_samples_train=200, regen_noise=1000000, validation_split=0.0, validation_data=None):
if validation_data is not None:
u_test = self._cdf((self._encode(validation_data)))
else:
u_test = None
#self.copula = ImplicitGenerativeCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2)
self.copula = ImplicitGenerativeCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2, n_layers=3, n_neurons=200)
hist = self.copula.fit(self.u, epochs=epochs, batch_size=batch_size, validation_data=u_test, regen_noise=regen_noise, validation_split=0.0)
return hist
def save_copula_model(self, path):
self.copula.save_model(path)
def load_copula_model(self, path, n_samples_train=200):
self.copula = ImplicitGenerativeCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2)
self.copula.load_model(path)
print("Loaded saved copula model.")
class GMMNCopulaAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with GMMN Copula """
def fit(self, epochs=100, batch_size=100, n_samples_train=200, regen_noise=10000000, validation_split=0.0, validation_data=None):
if validation_data is not None:
u_test = self._cdf((self._encode(validation_data)))
else:
u_test = None
#self.copula = GMMNCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2)
self.copula = GMMNCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2, n_layers=3, n_neurons=200)
hist = self.copula.fit(self.u, epochs=epochs, batch_size=batch_size, validation_data=u_test, regen_noise=regen_noise, validation_split=0.0)
return hist
def save_copula_model(self, path):
self.copula.save_model(path)
def load_copula_model(self, path, n_samples_train=200):
self.copula = GMMNCopula(dim_out = self.z.shape[1], n_samples_train=n_samples_train, dim_latent=self.z.shape[1]*2)
self.copula.load_model(path)
print("Loaded saved copula model.")
class VineCopulaAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with Vine Copula """
def fit(self, families="nonparametric", show_trace=False, trunc_lvl=18446744073709551615):
if families == "nonparametric":
controls = pv.FitControlsVinecop(family_set=[pv.BicopFamily.tll], trunc_lvl=trunc_lvl, show_trace=show_trace)
elif families == "parametric":
controls = pv.FitControlsVinecop(family_set=[pv.BicopFamily.indep,
pv.BicopFamily.gaussian,
pv.BicopFamily.student,
pv.BicopFamily.clayton,
pv.BicopFamily.gumbel,
pv.BicopFamily.frank,
pv.BicopFamily.joe,
pv.BicopFamily.bb1,
pv.BicopFamily.bb6,
pv.BicopFamily.bb7,
pv.BicopFamily.bb8],
trunc_lvl=trunc_lvl,
show_trace=show_trace)
else:
controls = pv.FitControlsVinecop(trunc_lvl=trunc_lvl, show_trace=show_trace)
self.copula = pv.Vinecop(data=self.u, controls=controls)
def save_model(self, path):
self.copula.to_json(path)
print(f"Saved vine copula model to {path}.")
def load_model(self, path):
self.copula = pv.Vinecop(filename=path)
print("Loaded vine copula model.")
class GaussianCopulaAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with Gaussian Copula """
def fit(self):
self.copula = mv_copulas.GaussianCopula()
self.copula.fit(self.u)
class IndependenceCopulaCopulaAutoEncoder(CopulaAutoEncoder):
""" Copula Auto Encoder with Independence Copula """
def fit(self):
pass
def _sample_u(self, n_samples):
return np.random.uniform(0.0, 1.0, size=(n_samples, self.u.shape[1]))
class VariationalAutoEncoder(object):
def __init__(self, decoder_model="models/autoencoder/VAE_decoder_fashion_mnist_100epochs", latent_dim=25):
if isinstance(decoder_model, str):
self.decoder_model = tf.keras.models.load_model(decoder_model)
else:
self.decoder_model = decoder_model
self.decoder_model.compile()
self.latent_dim = 25
def _sample_z(self, n_samples):
# sample from latent space
return np.random.normal(loc=0.0, scale=1.0, size=(n_samples, self.latent_dim))
def _decode(self,z):
return self.decoder_model.predict(z)
def fit(self):
pass
def sample_images(self, n_samples):
# sample an image
return self._decode(self._sample_z(n_samples))
def show_images(self, n=5, imgs=None, cmap="gray", title=None):
if imgs is None:
imgs = self.sample_images(n)
plt.figure(figsize=(16, 3))
for i in range(n):
ax = plt.subplot(1, n, i+1)
plt.imshow(np.squeeze(imgs[i]*255), vmin=0, vmax=255, cmap=cmap)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.suptitle(title)
plt.tight_layout()
|
[
"models.mv_copulas.GaussianCopula",
"numpy.random.uniform",
"matplotlib.pyplot.subplot",
"numpy.zeros_like",
"tensorflow.keras.models.load_model",
"models.igc.GMMNCopula",
"pyvinecopulib.FitControlsVinecop",
"matplotlib.pyplot.suptitle",
"models.igc.ImplicitGenerativeCopula",
"numpy.sort",
"matplotlib.pyplot.figure",
"pyvinecopulib.Vinecop",
"numpy.min",
"numpy.max",
"numpy.random.normal",
"numpy.squeeze",
"matplotlib.pyplot.tight_layout"
] |
[((1004, 1020), 'numpy.zeros_like', 'np.zeros_like', (['z'], {}), '(z)\n', (1017, 1020), True, 'import numpy as np\n'), ((1203, 1219), 'numpy.zeros_like', 'np.zeros_like', (['u'], {}), '(u)\n', (1216, 1219), True, 'import numpy as np\n'), ((2538, 2565), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 3)'}), '(figsize=(16, 3))\n', (2548, 2565), True, 'import matplotlib.pyplot as plt\n'), ((2810, 2829), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (2822, 2829), True, 'import matplotlib.pyplot as plt\n'), ((2838, 2856), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2854, 2856), True, 'import matplotlib.pyplot as plt\n'), ((3406, 3552), 'models.igc.ImplicitGenerativeCopula', 'ImplicitGenerativeCopula', ([], {'dim_out': 'self.z.shape[1]', 'n_samples_train': 'n_samples_train', 'dim_latent': '(self.z.shape[1] * 2)', 'n_layers': '(3)', 'n_neurons': '(200)'}), '(dim_out=self.z.shape[1], n_samples_train=\n n_samples_train, dim_latent=self.z.shape[1] * 2, n_layers=3, n_neurons=200)\n', (3430, 3552), False, 'from models.igc import ImplicitGenerativeCopula, GMMNCopula\n'), ((3879, 3998), 'models.igc.ImplicitGenerativeCopula', 'ImplicitGenerativeCopula', ([], {'dim_out': 'self.z.shape[1]', 'n_samples_train': 'n_samples_train', 'dim_latent': '(self.z.shape[1] * 2)'}), '(dim_out=self.z.shape[1], n_samples_train=\n n_samples_train, dim_latent=self.z.shape[1] * 2)\n', (3903, 3998), False, 'from models.igc import ImplicitGenerativeCopula, GMMNCopula\n'), ((4603, 4734), 'models.igc.GMMNCopula', 'GMMNCopula', ([], {'dim_out': 'self.z.shape[1]', 'n_samples_train': 'n_samples_train', 'dim_latent': '(self.z.shape[1] * 2)', 'n_layers': '(3)', 'n_neurons': '(200)'}), '(dim_out=self.z.shape[1], n_samples_train=n_samples_train,\n dim_latent=self.z.shape[1] * 2, n_layers=3, n_neurons=200)\n', (4613, 4734), False, 'from models.igc import ImplicitGenerativeCopula, GMMNCopula\n'), ((5062, 5166), 'models.igc.GMMNCopula', 'GMMNCopula', ([], {'dim_out': 'self.z.shape[1]', 'n_samples_train': 'n_samples_train', 'dim_latent': '(self.z.shape[1] * 2)'}), '(dim_out=self.z.shape[1], n_samples_train=n_samples_train,\n dim_latent=self.z.shape[1] * 2)\n', (5072, 5166), False, 'from models.igc import ImplicitGenerativeCopula, GMMNCopula\n'), ((6804, 6846), 'pyvinecopulib.Vinecop', 'pv.Vinecop', ([], {'data': 'self.u', 'controls': 'controls'}), '(data=self.u, controls=controls)\n', (6814, 6846), True, 'import pyvinecopulib as pv\n'), ((7022, 7047), 'pyvinecopulib.Vinecop', 'pv.Vinecop', ([], {'filename': 'path'}), '(filename=path)\n', (7032, 7047), True, 'import pyvinecopulib as pv\n'), ((7241, 7268), 'models.mv_copulas.GaussianCopula', 'mv_copulas.GaussianCopula', ([], {}), '()\n', (7266, 7268), False, 'from models import mv_copulas\n'), ((7507, 7569), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {'size': '(n_samples, self.u.shape[1])'}), '(0.0, 1.0, size=(n_samples, self.u.shape[1]))\n', (7524, 7569), True, 'import numpy as np\n'), ((8062, 8133), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': '(n_samples, self.latent_dim)'}), '(loc=0.0, scale=1.0, size=(n_samples, self.latent_dim))\n', (8078, 8133), True, 'import numpy as np\n'), ((8516, 8543), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 3)'}), '(figsize=(16, 3))\n', (8526, 8543), True, 'import matplotlib.pyplot as plt\n'), ((8788, 8807), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (8800, 8807), True, 'import matplotlib.pyplot as plt\n'), ((8816, 8834), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8832, 8834), True, 'import matplotlib.pyplot as plt\n'), ((446, 482), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['ae_model'], {}), '(ae_model)\n', (472, 482), True, 'import tensorflow as tf\n'), ((2610, 2634), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'n', '(i + 1)'], {}), '(1, n, i + 1)\n', (2621, 2634), True, 'import matplotlib.pyplot as plt\n'), ((5504, 5606), 'pyvinecopulib.FitControlsVinecop', 'pv.FitControlsVinecop', ([], {'family_set': '[pv.BicopFamily.tll]', 'trunc_lvl': 'trunc_lvl', 'show_trace': 'show_trace'}), '(family_set=[pv.BicopFamily.tll], trunc_lvl=trunc_lvl,\n show_trace=show_trace)\n', (5525, 5606), True, 'import pyvinecopulib as pv\n'), ((7798, 7839), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['decoder_model'], {}), '(decoder_model)\n', (7824, 7839), True, 'import tensorflow as tf\n'), ((8588, 8612), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'n', '(i + 1)'], {}), '(1, n, i + 1)\n', (8599, 8612), True, 'import matplotlib.pyplot as plt\n'), ((2656, 2681), 'numpy.squeeze', 'np.squeeze', (['(imgs[i] * 255)'], {}), '(imgs[i] * 255)\n', (2666, 2681), True, 'import numpy as np\n'), ((5665, 6003), 'pyvinecopulib.FitControlsVinecop', 'pv.FitControlsVinecop', ([], {'family_set': '[pv.BicopFamily.indep, pv.BicopFamily.gaussian, pv.BicopFamily.student, pv.\n BicopFamily.clayton, pv.BicopFamily.gumbel, pv.BicopFamily.frank, pv.\n BicopFamily.joe, pv.BicopFamily.bb1, pv.BicopFamily.bb6, pv.BicopFamily\n .bb7, pv.BicopFamily.bb8]', 'trunc_lvl': 'trunc_lvl', 'show_trace': 'show_trace'}), '(family_set=[pv.BicopFamily.indep, pv.BicopFamily.\n gaussian, pv.BicopFamily.student, pv.BicopFamily.clayton, pv.\n BicopFamily.gumbel, pv.BicopFamily.frank, pv.BicopFamily.joe, pv.\n BicopFamily.bb1, pv.BicopFamily.bb6, pv.BicopFamily.bb7, pv.BicopFamily\n .bb8], trunc_lvl=trunc_lvl, show_trace=show_trace)\n', (5686, 6003), True, 'import pyvinecopulib as pv\n'), ((6715, 6780), 'pyvinecopulib.FitControlsVinecop', 'pv.FitControlsVinecop', ([], {'trunc_lvl': 'trunc_lvl', 'show_trace': 'show_trace'}), '(trunc_lvl=trunc_lvl, show_trace=show_trace)\n', (6736, 6780), True, 'import pyvinecopulib as pv\n'), ((8634, 8659), 'numpy.squeeze', 'np.squeeze', (['(imgs[i] * 255)'], {}), '(imgs[i] * 255)\n', (8644, 8659), True, 'import numpy as np\n'), ((1647, 1662), 'numpy.min', 'np.min', (['z[:, i]'], {}), '(z[:, i])\n', (1653, 1662), True, 'import numpy as np\n'), ((1747, 1762), 'numpy.max', 'np.max', (['z[:, i]'], {}), '(z[:, i])\n', (1753, 1762), True, 'import numpy as np\n'), ((1670, 1686), 'numpy.sort', 'np.sort', (['z[:, i]'], {}), '(z[:, i])\n', (1677, 1686), True, 'import numpy as np\n'), ((1770, 1786), 'numpy.sort', 'np.sort', (['z[:, i]'], {}), '(z[:, i])\n', (1777, 1786), True, 'import numpy as np\n')]
|
import socket
import Constants
# Create a TCP/IP socket
socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.bind(('', 1339))
socket.listen(1)
while True:
connection, client_address = socket.accept()
print('connection from', client_address)
while True:
# noinspection PyBroadException
try:
data = connection.recv(16)
msg = str(data, "utf8")
msg = msg.replace("#", "")
print(msg)
connection.sendall(bytes(Constants.ANSWER_POSITIVE, "utf8"))
except:
break
|
[
"socket.bind",
"socket.accept",
"socket.socket",
"socket.listen"
] |
[((66, 115), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (79, 115), False, 'import socket\n'), ((116, 139), 'socket.bind', 'socket.bind', (["('', 1339)"], {}), "(('', 1339))\n", (127, 139), False, 'import socket\n'), ((141, 157), 'socket.listen', 'socket.listen', (['(1)'], {}), '(1)\n', (154, 157), False, 'import socket\n'), ((204, 219), 'socket.accept', 'socket.accept', ([], {}), '()\n', (217, 219), False, 'import socket\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Readqc report: record stat key-value in readqc-stats.txt
### JGI_Analysis_Utility_Illumina::illumina_read_level_report
Created: Jul 24 2013
sulsj (<EMAIL>)
"""
import os
import sys
## custom libs in "../lib/"
srcDir = os.path.dirname(__file__)
sys.path.append(os.path.join(srcDir, 'tools')) ## ./tools
sys.path.append(os.path.join(srcDir, '../lib')) ## rqc-pipeline/lib
sys.path.append(os.path.join(srcDir, '../tools')) ## rqc-pipeline/tools
from readqc_constants import RQCReadQcConfig, ReadqcStats
from rqc_constants import RQCExitCodes
from os_utility import run_sh_command
from common import append_rqc_stats, append_rqc_file
statsFile = RQCReadQcConfig.CFG["stats_file"]
filesFile = RQCReadQcConfig.CFG["files_file"]
"""
Title : read_megablast_hits
Function : This function generates tophit list of megablast against different databases.
Usage : read_megablast_hits(db_name, log)
Args : blast db name or full path
Returns : SUCCESS
FAILURE
Comments :
"""
def read_megablast_hits(db, log):
currentDir = RQCReadQcConfig.CFG["output_path"]
megablastDir = "megablast"
megablastPath = os.path.join(currentDir, megablastDir)
statsFile = RQCReadQcConfig.CFG["stats_file"]
filesFile = RQCReadQcConfig.CFG["files_file"]
##
## Process blast output files
##
matchings = 0
hitCount = 0
parsedFile = os.path.join(megablastPath, "megablast.*.%s*.parsed" % (db))
matchings, _, exitCode = run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l " % (parsedFile), True, log)
if exitCode == 0: ## if parsed file found.
t = matchings.split()
if len(t) == 1 and t[0].isdigit():
hitCount = int(t[0])
append_rqc_stats(statsFile, ReadqcStats.ILLUMINA_READ_MATCHING_HITS + " " + db, hitCount, log)
##
## add .parsed file
##
parsedFileFound, _, exitCode = run_sh_command("ls %s" % (parsedFile), True, log)
if parsedFileFound:
parsedFileFound = parsedFileFound.strip()
append_rqc_file(filesFile, ReadqcStats.ILLUMINA_READ_PARSED_FILE + " " + db, os.path.join(megablastPath, parsedFileFound), log)
else:
log.error("- Failed to add megablast parsed file of %s." % (db))
return RQCExitCodes.JGI_FAILURE
##
## wc the top hits
##
topHit = 0
tophitFile = os.path.join(megablastPath, "megablast.*.%s*.parsed.tophit" % (db))
tophits, _, exitCode = run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l " % (tophitFile), True, log)
t = tophits.split()
if len(t) == 1 and t[0].isdigit():
topHit = int(t[0])
append_rqc_stats(statsFile, ReadqcStats.ILLUMINA_READ_TOP_HITS + " " + db, topHit, log)
##
## wc the taxonomic species
##
spe = 0
taxlistFile = os.path.join(megablastPath, "megablast.*.%s*.parsed.taxlist" % (db))
species, _, exitCode = run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l " % (taxlistFile), True, log)
t = species.split()
if len(t) == 1 and t[0].isdigit():
spe = int(t[0])
append_rqc_stats(statsFile, ReadqcStats.ILLUMINA_READ_TAX_SPECIES + " " + db, spe, log)
##
## wc the top 100 hit
##
top100hits = 0
top100hitFile = os.path.join(megablastPath, "megablast.*.%s*.parsed.top100hit" % (db))
species, _, exitCode = run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l " % (top100hitFile), True, log)
t = species.split()
if len(t) == 1 and t[0].isdigit():
top100hits = int(t[0])
append_rqc_stats(statsFile, ReadqcStats.ILLUMINA_READ_TOP_100HITS + " " + db, top100hits, log)
##
## Find and add taxlist file
##
taxListFound, _, exitCode = run_sh_command("ls %s" % (taxlistFile), True, log)
taxListFound = taxListFound.strip()
if taxListFound:
append_rqc_file(filesFile, ReadqcStats.ILLUMINA_READ_TAXLIST_FILE + " " + db, os.path.join(megablastPath, taxListFound), log)
else:
log.error("- Failed to add megablast taxlist file of %s." % (db))
return RQCExitCodes.JGI_FAILURE
##
## Find and add tophit file
##
tophitFound, _, exitCode = run_sh_command("ls %s" % (tophitFile), True, log)
tophitFound = tophitFound.strip()
if tophitFound:
append_rqc_file(filesFile, ReadqcStats.ILLUMINA_READ_TOPHIT_FILE + " " + db, os.path.join(megablastPath, tophitFound), log)
else:
log.error("- Failed to add megablast tophit file of %s." % (db))
return RQCExitCodes.JGI_FAILURE
##
## Find and add top100hit file
##
top100hitFound, _, exitCode = run_sh_command("ls %s" % (top100hitFile), True, log)
top100hitFound = top100hitFound.strip()
if top100hitFound:
append_rqc_file(filesFile, ReadqcStats.ILLUMINA_READ_TOP100HIT_FILE + " " + db, os.path.join(megablastPath, top100hitFound), log)
else:
log.error("- Failed to add megablast top100hit file of %s." % (db))
return RQCExitCodes.JGI_FAILURE
else:
log.info("- No blast hits for %s." % (db))
return RQCExitCodes.JGI_SUCCESS
"""
Title : read_level_qual_stats
Function : Generate qual scores and plots of read level 20mer sampling
Usage : read_level_mer_sampling($analysis, $summary_file_dir)
Args : 1) A reference to an JGI_Analysis object
2) current working folder wkdir/uniqueness
Returns : JGI_SUCCESS: Illumina read level report could be successfully generated.
JGI_FAILURE: Illumina read level report could not be generated.
Comments : This function is intended to be called at the very end of the illumina read level data processing script.
"""
def read_level_mer_sampling(dataToRecordDict, dataFile, log):
retCode = RQCExitCodes.JGI_FAILURE
## Old data
#nSeq nStartUniMer fracStartUniMer nRandUniMer fracRandUniMer
## 0 1 2 3 4
##25000 2500 0.1 9704 0.3882
## New data
#count first rand first_cnt rand_cnt
# 0 1 2 3 4
#25000 66.400 76.088 16600 19022
#50000 52.148 59.480 13037 14870
#75000 46.592 53.444 11648 13361
#100000 43.072 49.184 10768 12296 ...
if os.path.isfile(dataFile):
with open(dataFile, "r") as merFH:
lines = merFH.readlines()
## last line
t = lines[-1].split('\t')
# breaks 2016-09-07
#assert len(t) == 5
totalMers = int(t[0])
## new by bbcountunique
uniqStartMerPer = float("%.2f" % (float(t[1])))
uniqRandtMerPer = float("%.2f" % (float(t[2])))
dataToRecordDict[ReadqcStats.ILLUMINA_READ_20MER_SAMPLE_SIZE] = totalMers
dataToRecordDict[ReadqcStats.ILLUMINA_READ_20MER_PERCENTAGE_STARTING_MERS] = uniqStartMerPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_20MER_PERCENTAGE_RANDOM_MERS] = uniqRandtMerPer
retCode = RQCExitCodes.JGI_SUCCESS
else:
log.error("- qhist file not found: %s" % (dataFile))
return retCode
"""
Title : base_level_qual_stats
Function : Generate qual scores and plots of read level QC
Usage : base_level_qual_stats($analysis, $)
Args : 1) A reference to an JGI_Analysis object
2) current working folder wkdir/qual
Returns : JGI_SUCCESS: Illumina read level report could be successfully generated.
JGI_FAILURE: Illumina read level report could not be generated.
Comments : This function is intended to be called at the very end of the illumina base level data processing script.
"""
def base_level_qual_stats(dataToRecordDict, reformatObqhistFile, log):
cummlatPer = 0
cummlatBase = 0
statsPerc = {30:0, 25:0, 20:0, 15:0, 10:0, 5:0}
statsBase = {30:0, 25:0, 20:0, 15:0, 10:0, 5:0}
Q30_seen = 0
Q25_seen = 0
Q20_seen = 0
Q15_seen = 0
Q10_seen = 0
Q5_seen = 0
## New format
##Median 38
##Mean 37.061
##STDev 4.631
##Mean_30 37.823
##STDev_30 1.699
##Quality bases fraction
#0 159 0.00008
#1 0 0.00000
#2 12175 0.00593
#3 0 0.00000
#4 0 0.00000
#5 0 0.00000
#6 0 0.00000
allLines = open(reformatObqhistFile).readlines()
for l in allLines[::-1]:
l = l.strip()
##
## obqhist file format example
##
# #Median 36
# #Mean 33.298
# #STDev 5.890
# #Mean_30 35.303
# #STDev_30 1.517
# #Quality bases fraction
# 0 77098 0.00043
# 1 0 0.00000
# 2 0 0.00000
# 3 0 0.00000
# 4 0 0.00000
# 5 0 0.00000
# 6 0 0.00000
if len(l) > 0:
if l.startswith("#"):
if l.startswith("#Mean_30"):
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q30_SCORE_MEAN] = l.split('\t')[1]
elif l.startswith("#STDev_30"):
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q30_SCORE_STD] = l.split('\t')[1]
elif l.startswith("#Mean"):
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_OVERALL_BASES_Q_SCORE_MEAN] = l.split('\t')[1]
elif l.startswith("#STDev"):
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_OVERALL_BASES_Q_SCORE_STD] = l.split('\t')[1]
continue
qavg = None
nbase = None
percent = None
t = l.split()
try:
qavg = int(t[0])
nbase = int(t[1])
percent = float(t[2])
except IndexError:
log.warn("parse error in base_level_qual_stats: %s %s %s %s" % (l, qavg, nbase, percent))
continue
log.debug("base_level_qual_stats(): qavg and nbase and percent: %s %s %s" % (qavg, nbase, percent))
cummlatPer += percent * 100.0
cummlatPer = float("%.f" % (cummlatPer))
if cummlatPer > 100:
cummlatPer = 100.0 ## RQC-621
cummlatBase += nbase
if qavg == 30:
Q30_seen = 1
statsPerc[30] = cummlatPer
statsBase[30] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q30] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C30] = cummlatBase
elif qavg == 25:
Q25_seen = 1
statsPerc[25] = cummlatPer
statsBase[25] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q25] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C25] = cummlatBase
elif qavg == 20:
Q20_seen = 1
statsPerc[20] = cummlatPer
statsBase[20] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q20] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C20] = cummlatBase
elif qavg == 15:
Q15_seen = 1
statsPerc[15] = cummlatPer
statsBase[15] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q15] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C15] = cummlatBase
elif qavg == 10:
Q10_seen = 1
statsPerc[10] = cummlatPer
statsBase[10] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q10] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C10] = cummlatBase
elif qavg == 5:
Q5_seen = 1
statsPerc[5] = cummlatPer
statsBase[5] = cummlatBase
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q5] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C5] = cummlatBase
## Double check that no value is missing.
if Q25_seen == 0 and Q30_seen != 0:
Q25_seen = 1
statsPerc[25] = statsPerc[30]
statsBase[25] = statsBase[30]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q25] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C25] = cummlatBase
if Q20_seen == 0 and Q25_seen != 0:
Q20_seen = 1
statsPerc[20] = statsPerc[25]
statsBase[20] = statsBase[25]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q20] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C20] = cummlatBase
if Q15_seen == 0 and Q20_seen != 0:
Q15_seen = 1
statsPerc[15] = statsPerc[20]
statsBase[15] = statsBase[20]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q15] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C15] = cummlatBase
if Q10_seen == 0 and Q15_seen != 0:
Q10_seen = 1
statsPerc[10] = statsPerc[15]
statsBase[10] = statsBase[15]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q10] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C10] = cummlatBase
if Q5_seen == 0 and Q10_seen != 0:
Q5_seen = 1
statsPerc[5] = statsPerc[10]
statsBase[5] = statsBase[10]
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q5] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C5] = cummlatBase
if Q30_seen == 0:
log.error("Q30 is 0. Base quality values are ZERO.")
log.debug("Q and C values: %s" % (dataToRecordDict))
return RQCExitCodes.JGI_SUCCESS
"""
Title : q20_score
Function : this method returns Q20 using a qrpt file as input
Usage : JGI_QC_Utility::qc20_score($qrpt)
Args : $_[0] : qrpt file.
Returns : a number of Q20 score
Comments :
"""
# def q20_score(qrpt, log):
# log.debug("qrpt file %s" % (qrpt))
#
# q20 = None
# num = 0
#
# if os.path.isfile(qrpt):
# with open(qrpt, "r") as qrptFH:
# for l in qrptFH:
# num += 1
#
# if num == 1:
# continue
#
# ##############
# ## Old format
# ## READ1.qrpt
# ## column count min max sum mean Q1 med Q3 IQR lW rW A_Count C_Count G_Count T_Count N_Count Max_count
# ## 1 378701 2 34 12447306 32.87 31 34 34 3 27 34 108573 83917 81999 104127 85 378701
# ## 2 378701 2 34 12515957 33.05 33 34 34 1 32 34 112178 83555 84449 98519 0 378701
# ## 3 378701 2 34 12519460 33.06 33 34 34 1 32 34 104668 72341 80992 120700 0 378701
# ## 4 378701 2 37 13807944 36.46 37 37 37 0 37 37 96935 95322 83958 102440 46 378701
# ## 5 378701 2 37 13790443 36.42 37 37 37 0 37 37 114586 68297 78020 117740 58 378701
# ##
# ## or
# ##
# ## READ2.qrpt
# ## column count min max sum mean Q1 med Q3 IQR lW rW A_Count C_Count G_Count T_Count N_Count Max_count
# ## 1 378701 2 34 8875097 23.44 25 26 28 3 21 32 106904 84046 81795 105956 0 378701
# ## 2 378701 2 34 6543224 17.28 15 16 26 11 2 34 107573 77148 97953 88998 7029 378701
# ## 3 378701 2 34 7131741 18.83 16 16 26 10 2 34 96452 83003 107891 91355 0 378701
# ## 4 378701 2 37 9686653 25.58 19 32 33 14 2 37 97835 78304 87944 114618 0 378701
# ## 5 378701 2 37 10208226 26.96 25 33 35 10 10 37 98021 90611 89040 101029 0 378701
#
# pos = None
# mean = None
# t = l.split("\t")
# assert len(t) > 6
# pos = int(t[0])
# mean = float(t[5])
#
# if mean and pos:
# if mean < 20:
# return pos - 1
# else:
# q20 = pos
#
# else:
# log.error("- qhist file not found: %s" % (qrpt))
# return None
#
#
# return q20
def q20_score_new(bqHist, readNum, log):
log.debug("q20_score_new(): bqHist file = %s" % (bqHist))
q20 = None
if os.path.isfile(bqHist):
with open(bqHist, "r") as qrptFH:
for l in qrptFH:
if l.startswith('#'):
continue
## New data
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
##BaseNum count_1 min_1 max_1 mean_1 Q1_1 med_1 Q3_1 LW_1 RW_1 count_2 min_2 max_2 mean_2 Q1_2 med_2 Q3_2 LW_2 RW_2
# 0 6900 0 36 33.48 33 34 34 29 36 6900 0 36 33.48 33 34 34 29 36
pos = None
mean = None
t = l.split("\t")
pos = int(t[0]) + 1
if readNum == 1:
mean = float(t[4])
else:
mean = float(t[13])
if mean and pos:
if mean < 20:
return pos - 1
else:
q20 = pos
else:
log.error("- bqHist file not found: %s" % (bqHist))
return None
return q20
"""
Title : read_level_qual_stats
Function : Generate qual scores and plots of read level QC
Usage : read_level_qual_stats($analysis, $)
Args : 1) A reference to an JGI_Analysis object
2) current working folder wkdir/qual
Returns : JGI_SUCCESS: Illumina read level report could be successfully generated.
JGI_FAILURE: Illumina read level report could not be generated.
Comments : This function is intended to be called at the very end of the illumina read level data processing script.
"""
def read_level_qual_stats(dataToRecordDict, qhistTxtFullPath, log):
retCode = RQCExitCodes.JGI_FAILURE
cummlatPer = 0.0
Q30_seen = 0
Q25_seen = 0
Q20_seen = 0
Q15_seen = 0
Q10_seen = 0
Q5_seen = 0
if os.path.isfile(qhistTxtFullPath):
stats = {30:0, 25:0, 20:0, 15:0, 10:0, 5:0}
allLines = open(qhistTxtFullPath).readlines()
for l in allLines[::-1]:
if not l:
break
if l.startswith('#'):
continue
t = l.split()
assert len(t) == 3
qavg = int(t[0])
percent = float(t[2]) * 100.0 ## 20140826 Changed for bbtools
cummlatPer = cummlatPer + percent
cummlatPer = float("%.2f" % cummlatPer)
if qavg <= 30 and qavg > 25 and Q30_seen == 0:
Q30_seen = 1
stats[30] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q30] = cummlatPer
elif qavg <= 25 and qavg > 20 and Q25_seen == 0:
Q25_seen = 1
stats[25] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q25] = cummlatPer
elif qavg <= 20 and qavg > 15 and Q20_seen == 0:
Q20_seen = 1
stats[20] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q20] = cummlatPer
elif qavg <= 15 and qavg > 10 and Q15_seen == 0:
Q15_seen = 1
stats[15] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q15] = cummlatPer
elif qavg <= 10 and qavg > 5 and Q10_seen == 0:
Q10_seen = 1
stats[10] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q10] = cummlatPer
elif qavg <= 5 and Q5_seen == 0:
Q5_seen = 1
stats[5] = cummlatPer
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q5] = cummlatPer
### Double check that no value is missing.
if Q25_seen == 0 and Q30_seen != 0:
Q25_seen = 1
stats[25] = stats[30]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q25] = cummlatPer
if Q20_seen == 0 and Q25_seen != 0:
Q20_seen = 1
stats[20] = stats[25]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q20] = cummlatPer
if Q15_seen == 0 and Q20_seen != 0:
Q15_seen = 1
stats[15] = stats[20]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q15] = cummlatPer
if Q10_seen == 0 and Q15_seen != 0:
Q10_seen = 1
stats[10] = stats[15]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q10] = cummlatPer
if Q5_seen == 0 and Q10_seen != 0:
Q5_seen = 1
stats[5] = stats[10]
dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q5] = cummlatPer
if Q30_seen == 0:
log.error("Q30 is 0 . Read quality values are ZERO.")
log.debug("Q30 %s, Q25 %s, Q20 %s, Q15 %s, Q10 %s, Q5 %s" % \
(stats[30], stats[25], stats[20], stats[15], stats[10], stats[5]))
retCode = RQCExitCodes.JGI_SUCCESS
else:
log.error("- qhist file not found: %s" % (qhistTxtFullPath))
return retCode
"""
Title : read_gc_mean
Function : This function generates average GC content % and its standard deviation and put them into database.
Usage : read_gc_mean($analysis)
Args : 1) A reference to an JGI_Analysis object
Returns : JGI_SUCCESS:
JGI_FAILURE:
Comments :
"""
def read_gc_mean(histFile, log):
mean = 0.0
stdev = 0.0
retCode = RQCExitCodes.JGI_FAILURE
if os.path.isfile(histFile):
with open(histFile, "r") as histFH:
line = histFH.readline() ## we only need the first line
# Ex) #Found 1086 total values totalling 420.3971. <0.387106 +/- 0.112691>
if len(line) == 0 or not line.startswith("#Found"):
log.error("- GC content hist text file does not contains right results: %s, %s" % (histFile, line))
retCode = RQCExitCodes.JGI_FAILURE
else:
toks = line.split()
assert len(toks) == 9
mean = float(toks[6][1:]) * 100.0
stdev = float(toks[8][:-1]) * 100.0
log.debug("mean, stdev = %.2f, %.2f" % (mean, stdev))
retCode = RQCExitCodes.JGI_SUCCESS
else:
log.error("- gc hist file not found: %s" % (histFile))
return retCode, mean, stdev
if __name__ == "__main__":
exit(0)
## EOF
|
[
"common.append_rqc_stats",
"os.path.dirname",
"os.path.isfile",
"os_utility.run_sh_command",
"os.path.join"
] |
[((281, 306), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (296, 306), False, 'import os\n'), ((323, 352), 'os.path.join', 'os.path.join', (['srcDir', '"""tools"""'], {}), "(srcDir, 'tools')\n", (335, 352), False, 'import os\n'), ((385, 415), 'os.path.join', 'os.path.join', (['srcDir', '"""../lib"""'], {}), "(srcDir, '../lib')\n", (397, 415), False, 'import os\n'), ((456, 488), 'os.path.join', 'os.path.join', (['srcDir', '"""../tools"""'], {}), "(srcDir, '../tools')\n", (468, 488), False, 'import os\n'), ((1217, 1255), 'os.path.join', 'os.path.join', (['currentDir', 'megablastDir'], {}), '(currentDir, megablastDir)\n', (1229, 1255), False, 'import os\n'), ((1459, 1517), 'os.path.join', 'os.path.join', (['megablastPath', "('megablast.*.%s*.parsed' % db)"], {}), "(megablastPath, 'megablast.*.%s*.parsed' % db)\n", (1471, 1517), False, 'import os\n'), ((1549, 1627), 'os_utility.run_sh_command', 'run_sh_command', (['("grep -v \'^#\' %s 2>/dev/null | wc -l " % parsedFile)', '(True)', 'log'], {}), '("grep -v \'^#\' %s 2>/dev/null | wc -l " % parsedFile, True, log)\n', (1563, 1627), False, 'from os_utility import run_sh_command\n'), ((6610, 6634), 'os.path.isfile', 'os.path.isfile', (['dataFile'], {}), '(dataFile)\n', (6624, 6634), False, 'import os\n'), ((16911, 16933), 'os.path.isfile', 'os.path.isfile', (['bqHist'], {}), '(bqHist)\n', (16925, 16933), False, 'import os\n'), ((18958, 18990), 'os.path.isfile', 'os.path.isfile', (['qhistTxtFullPath'], {}), '(qhistTxtFullPath)\n', (18972, 18990), False, 'import os\n'), ((22468, 22492), 'os.path.isfile', 'os.path.isfile', (['histFile'], {}), '(histFile)\n', (22482, 22492), False, 'import os\n'), ((1794, 1892), 'common.append_rqc_stats', 'append_rqc_stats', (['statsFile', "(ReadqcStats.ILLUMINA_READ_MATCHING_HITS + ' ' + db)", 'hitCount', 'log'], {}), "(statsFile, ReadqcStats.ILLUMINA_READ_MATCHING_HITS + ' ' +\n db, hitCount, log)\n", (1810, 1892), False, 'from common import append_rqc_stats, append_rqc_file\n'), ((1979, 2026), 'os_utility.run_sh_command', 'run_sh_command', (["('ls %s' % parsedFile)", '(True)', 'log'], {}), "('ls %s' % parsedFile, True, log)\n", (1993, 2026), False, 'from os_utility import run_sh_command\n'), ((2477, 2542), 'os.path.join', 'os.path.join', (['megablastPath', "('megablast.*.%s*.parsed.tophit' % db)"], {}), "(megablastPath, 'megablast.*.%s*.parsed.tophit' % db)\n", (2489, 2542), False, 'import os\n'), ((2576, 2654), 'os_utility.run_sh_command', 'run_sh_command', (['("grep -v \'^#\' %s 2>/dev/null | wc -l " % tophitFile)', '(True)', 'log'], {}), '("grep -v \'^#\' %s 2>/dev/null | wc -l " % tophitFile, True, log)\n', (2590, 2654), False, 'from os_utility import run_sh_command\n'), ((2770, 2861), 'common.append_rqc_stats', 'append_rqc_stats', (['statsFile', "(ReadqcStats.ILLUMINA_READ_TOP_HITS + ' ' + db)", 'topHit', 'log'], {}), "(statsFile, ReadqcStats.ILLUMINA_READ_TOP_HITS + ' ' + db,\n topHit, log)\n", (2786, 2861), False, 'from common import append_rqc_stats, append_rqc_file\n'), ((2955, 3021), 'os.path.join', 'os.path.join', (['megablastPath', "('megablast.*.%s*.parsed.taxlist' % db)"], {}), "(megablastPath, 'megablast.*.%s*.parsed.taxlist' % db)\n", (2967, 3021), False, 'import os\n'), ((3055, 3134), 'os_utility.run_sh_command', 'run_sh_command', (['("grep -v \'^#\' %s 2>/dev/null | wc -l " % taxlistFile)', '(True)', 'log'], {}), '("grep -v \'^#\' %s 2>/dev/null | wc -l " % taxlistFile, True, log)\n', (3069, 3134), False, 'from os_utility import run_sh_command\n'), ((3247, 3338), 'common.append_rqc_stats', 'append_rqc_stats', (['statsFile', "(ReadqcStats.ILLUMINA_READ_TAX_SPECIES + ' ' + db)", 'spe', 'log'], {}), "(statsFile, ReadqcStats.ILLUMINA_READ_TAX_SPECIES + ' ' +\n db, spe, log)\n", (3263, 3338), False, 'from common import append_rqc_stats, append_rqc_file\n'), ((3435, 3503), 'os.path.join', 'os.path.join', (['megablastPath', "('megablast.*.%s*.parsed.top100hit' % db)"], {}), "(megablastPath, 'megablast.*.%s*.parsed.top100hit' % db)\n", (3447, 3503), False, 'import os\n'), ((3537, 3622), 'os_utility.run_sh_command', 'run_sh_command', (['("grep -v \'^#\' %s 2>/dev/null | wc -l " % top100hitFile)', '(True)', 'log'], {}), '("grep -v \'^#\' %s 2>/dev/null | wc -l " % top100hitFile, True,\n log)\n', (3551, 3622), False, 'from os_utility import run_sh_command\n'), ((3738, 3836), 'common.append_rqc_stats', 'append_rqc_stats', (['statsFile', "(ReadqcStats.ILLUMINA_READ_TOP_100HITS + ' ' + db)", 'top100hits', 'log'], {}), "(statsFile, ReadqcStats.ILLUMINA_READ_TOP_100HITS + ' ' +\n db, top100hits, log)\n", (3754, 3836), False, 'from common import append_rqc_stats, append_rqc_file\n'), ((3929, 3977), 'os_utility.run_sh_command', 'run_sh_command', (["('ls %s' % taxlistFile)", '(True)', 'log'], {}), "('ls %s' % taxlistFile, True, log)\n", (3943, 3977), False, 'from os_utility import run_sh_command\n'), ((4418, 4465), 'os_utility.run_sh_command', 'run_sh_command', (["('ls %s' % tophitFile)", '(True)', 'log'], {}), "('ls %s' % tophitFile, True, log)\n", (4432, 4465), False, 'from os_utility import run_sh_command\n'), ((4906, 4956), 'os_utility.run_sh_command', 'run_sh_command', (["('ls %s' % top100hitFile)", '(True)', 'log'], {}), "('ls %s' % top100hitFile, True, log)\n", (4920, 4956), False, 'from os_utility import run_sh_command\n'), ((2201, 2245), 'os.path.join', 'os.path.join', (['megablastPath', 'parsedFileFound'], {}), '(megablastPath, parsedFileFound)\n', (2213, 2245), False, 'import os\n'), ((4140, 4181), 'os.path.join', 'os.path.join', (['megablastPath', 'taxListFound'], {}), '(megablastPath, taxListFound)\n', (4152, 4181), False, 'import os\n'), ((4624, 4664), 'os.path.join', 'os.path.join', (['megablastPath', 'tophitFound'], {}), '(megablastPath, tophitFound)\n', (4636, 4664), False, 'import os\n'), ((5127, 5170), 'os.path.join', 'os.path.join', (['megablastPath', 'top100hitFound'], {}), '(megablastPath, top100hitFound)\n', (5139, 5170), False, 'import os\n')]
|
# Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from mox import IsA # noqa
from django import http
from django.core.urlresolvers import reverse
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.idm import tests as idm_tests
from openstack_dashboard.dashboards.idm.organizations \
import forms as organizations_forms
INDEX_URL = reverse('horizon:idm:organizations:index')
CREATE_URL = reverse('horizon:idm:organizations:create')
class BaseOrganizationsTests(idm_tests.BaseTestCase):
def _get_project_info(self, project):
project_info = {
"name": unicode(project.name),
"description": unicode(project.description),
"enabled": project.enabled,
"domain": IsA(api.base.APIDictWrapper),
"city": '',
"email": '',
"img":IsA(str),
# '/static/dashboard/img/logos/small/group.png',
"website" : ''
}
return project_info
class IndexTests(BaseOrganizationsTests):
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index(self):
user_organizations = self.list_organizations()
# Owned organizations mockup
# Only calls the default/first tab, no need to mock the others tab
api.keystone.tenant_list(IsA(http.HttpRequest),
user=self.user.id,
admin=False).AndReturn((user_organizations, False))
self.mox.ReplayAll()
response = self.client.get(INDEX_URL)
self.assertTemplateUsed(response, 'idm/organizations/index.html')
self.assertItemsEqual(response.context['table'].data, user_organizations)
self.assertNoMessages()
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_other_organizations_tab(self):
all_organizations = self.list_organizations()
user_organizations = all_organizations[len(all_organizations)/2:]
other_organizations = all_organizations[:len(all_organizations)/2]
# Other organizations mockup
api.keystone.tenant_list(IsA(http.HttpRequest),
admin=False).AndReturn((all_organizations, False))
api.keystone.tenant_list(IsA(http.HttpRequest),
user=self.user.id,
admin=False).AndReturn((user_organizations, False))
self.mox.ReplayAll()
response = self.client.get(INDEX_URL + '?tab=panel_tabs__organizations_tab')
self.assertTemplateUsed(response, 'idm/organizations/index.html')
self.assertItemsEqual(response.context['table'].data, other_organizations)
self.assertNoMessages()
class DetailTests(BaseOrganizationsTests):
@test.create_stubs({
api.keystone: (
'tenant_get',
'user_list',
)
})
def test_detail(self):
project = self.get_organization()
users = self.users.list()
api.keystone.user_list(IsA(http.HttpRequest),
project=project.id,
filters={'enabled':True}).AndReturn(users)
api.keystone.tenant_get(IsA(http.HttpRequest),
project.id,
admin=True).AndReturn(project)
self.mox.ReplayAll()
url = reverse('horizon:idm:organizations:detail', args=[project.id])
response = self.client.get(url)
self.assertTemplateUsed(response, 'idm/organizations/detail.html')
self.assertItemsEqual(response.context['members_table'].data, users)
self.assertNoMessages()
class CreateTests(BaseOrganizationsTests):
@test.create_stubs({api.keystone: ('tenant_create',)})
def test_create_organization(self):
project = self.get_organization()
project_details = self._get_project_info(project)
api.keystone.tenant_create(IsA(http.HttpRequest),
**project_details).AndReturn(project)
self.mox.ReplayAll()
form_data = {
'method': 'CreateOrganizationForm',
'name': project._info["name"],
'description': project._info["description"],
}
response = self.client.post(CREATE_URL, form_data)
self.assertNoFormErrors(response)
def test_create_organization_required_fields(self):
form_data = {
'method': 'CreateOrganizationForm',
'name': '',
'description': '',
}
response = self.client.post(CREATE_URL, form_data)
self.assertFormError(response, 'form', 'name', ['This field is required.'])
self.assertNoMessages()
class UpdateInfoTests(BaseOrganizationsTests):
@test.create_stubs({
api.keystone: (
'tenant_update',
'tenant_get',
),
})
def test_update_info(self):
project = self.get_organization()
updated_project = {"name": 'Updated organization',
"description": 'updated organization',
"enabled": True,
"city": 'Madrid'}
api.keystone.tenant_get(IsA(http.HttpRequest), project.id).AndReturn(project)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project).AndReturn(project)
self.mox.ReplayAll()
form_data = {
'method': 'InfoForm',
'orgID':project.id,
'name': updated_project["name"],
'description': updated_project["description"],
'city':updated_project["city"],
}
url = reverse('horizon:idm:organizations:info', args=[project.id])
response = self.client.post(url, form_data)
self.assertNoFormErrors(response)
@unittest.skip('not ready')
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_update_info_required_fields(self):
project = self.get_organization()
api.keystone.tenant_get(IsA(http.HttpRequest), project.id).AndReturn(project)
self.mox.ReplayAll()
form_data = {
'method': 'InfoForm',
'orgID': project.id,
'name': '',
'description': '',
'city': '',
}
url = reverse('horizon:idm:organizations:info', args=[project.id])
response = self.client.post(url, form_data)
# FIXME(garcianavalon) form contains the last form in forms, not the one
# we want to test. The world is tought for multiforms :(
self.assertFormError(response, 'form', 'name', ['This field is required.'])
self.assertFormError(response, 'form', 'description', ['This field is required.'])
self.assertNoMessages()
class UpdateContactTests(BaseOrganizationsTests):
@test.create_stubs({
api.keystone: (
'tenant_update',
'tenant_get',
),
})
def test_update_contact(self):
project = self.get_organization()
updated_project = {
"email": '<EMAIL>',
"website": 'http://www.organization.com/',
}
api.keystone.tenant_get(IsA(http.HttpRequest), project.id).AndReturn(project)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project).AndReturn(project)
self.mox.ReplayAll()
form_data = {
'method': 'ContactForm',
'orgID':project.id,
'email': updated_project["email"],
'website': updated_project["website"],
}
url = reverse('horizon:idm:organizations:contact', args=[project.id])
response = self.client.post(url, form_data)
self.assertNoFormErrors(response)
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_update_contact_required_fields(self):
project = self.get_organization()
api.keystone.tenant_get(IsA(http.HttpRequest), project.id).AndReturn(project)
self.mox.ReplayAll()
form_data = {
'method': 'ContactForm',
'orgID':project.id,
'email': '',
'website': '',
}
url = reverse('horizon:idm:organizations:contact', args=[project.id])
response = self.client.post(url, form_data)
self.assertNoMessages()
class DeleteTests(BaseOrganizationsTests):
@test.create_stubs({
api.keystone: (
'tenant_delete',
'tenant_get',
),
})
def test_delete_organization(self):
project = self.get_organization()
api.keystone.tenant_get(IsA(http.HttpRequest), project.id).AndReturn(project)
api.keystone.tenant_delete(IsA(http.HttpRequest), project).AndReturn(None)
self.mox.ReplayAll()
form_data = {
'method': 'CancelForm',
'orgID': project.id,
}
url = reverse('horizon:idm:organizations:cancel', args=[project.id])
response = self.client.post(url, form_data)
self.assertNoFormErrors(response)
class UpdateAvatarTests(BaseOrganizationsTests):
# https://docs.djangoproject.com/en/1.7/topics/testing/tools/#django.test.Client.post
# https://code.google.com/p/pymox/wiki/MoxDocumentation
@unittest.skip('not ready')
@test.create_stubs({
api.keystone: (
'tenant_update',
),
})
def test_update_avatar(self):
project = self.get_organization()
mock_file = self.mox.CreateMock(file)
updated_project = {"image": 'image',}
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project).AndReturn(project)
self.mox.ReplayAll()
form_data = {
'method': 'AvatarForm',
'orgID':project.id,
'image': updated_project["image"],
}
url = reverse('horizon:idm:organizations:avatar', args=[project.id])
response = self.client.post(url, form_data)
self.assertNoFormErrors(response)
|
[
"unittest.skip",
"openstack_dashboard.test.helpers.create_stubs",
"django.core.urlresolvers.reverse",
"mox.IsA"
] |
[((984, 1026), 'django.core.urlresolvers.reverse', 'reverse', (['"""horizon:idm:organizations:index"""'], {}), "('horizon:idm:organizations:index')\n", (991, 1026), False, 'from django.core.urlresolvers import reverse\n'), ((1040, 1083), 'django.core.urlresolvers.reverse', 'reverse', (['"""horizon:idm:organizations:create"""'], {}), "('horizon:idm:organizations:create')\n", (1047, 1083), False, 'from django.core.urlresolvers import reverse\n'), ((1652, 1703), 'openstack_dashboard.test.helpers.create_stubs', 'test.create_stubs', (["{api.keystone: ('tenant_list',)}"], {}), "({api.keystone: ('tenant_list',)})\n", (1669, 1703), True, 'from openstack_dashboard.test import helpers as test\n'), ((2360, 2411), 'openstack_dashboard.test.helpers.create_stubs', 'test.create_stubs', (["{api.keystone: ('tenant_list',)}"], {}), "({api.keystone: ('tenant_list',)})\n", (2377, 2411), True, 'from openstack_dashboard.test import helpers as test\n'), ((3382, 3444), 'openstack_dashboard.test.helpers.create_stubs', 'test.create_stubs', (["{api.keystone: ('tenant_get', 'user_list')}"], {}), "({api.keystone: ('tenant_get', 'user_list')})\n", (3399, 3444), True, 'from openstack_dashboard.test import helpers as test\n'), ((4326, 4379), 'openstack_dashboard.test.helpers.create_stubs', 'test.create_stubs', (["{api.keystone: ('tenant_create',)}"], {}), "({api.keystone: ('tenant_create',)})\n", (4343, 4379), True, 'from openstack_dashboard.test import helpers as test\n'), ((5395, 5461), 'openstack_dashboard.test.helpers.create_stubs', 'test.create_stubs', (["{api.keystone: ('tenant_update', 'tenant_get')}"], {}), "({api.keystone: ('tenant_update', 'tenant_get')})\n", (5412, 5461), True, 'from openstack_dashboard.test import helpers as test\n'), ((6523, 6549), 'unittest.skip', 'unittest.skip', (['"""not ready"""'], {}), "('not ready')\n", (6536, 6549), False, 'import unittest\n'), ((6555, 6605), 'openstack_dashboard.test.helpers.create_stubs', 'test.create_stubs', (["{api.keystone: ('tenant_get',)}"], {}), "({api.keystone: ('tenant_get',)})\n", (6572, 6605), True, 'from openstack_dashboard.test import helpers as test\n'), ((7533, 7599), 'openstack_dashboard.test.helpers.create_stubs', 'test.create_stubs', (["{api.keystone: ('tenant_update', 'tenant_get')}"], {}), "({api.keystone: ('tenant_update', 'tenant_get')})\n", (7550, 7599), True, 'from openstack_dashboard.test import helpers as test\n'), ((8530, 8580), 'openstack_dashboard.test.helpers.create_stubs', 'test.create_stubs', (["{api.keystone: ('tenant_get',)}"], {}), "({api.keystone: ('tenant_get',)})\n", (8547, 8580), True, 'from openstack_dashboard.test import helpers as test\n'), ((9157, 9223), 'openstack_dashboard.test.helpers.create_stubs', 'test.create_stubs', (["{api.keystone: ('tenant_delete', 'tenant_get')}"], {}), "({api.keystone: ('tenant_delete', 'tenant_get')})\n", (9174, 9223), True, 'from openstack_dashboard.test import helpers as test\n'), ((10034, 10060), 'unittest.skip', 'unittest.skip', (['"""not ready"""'], {}), "('not ready')\n", (10047, 10060), False, 'import unittest\n'), ((10066, 10119), 'openstack_dashboard.test.helpers.create_stubs', 'test.create_stubs', (["{api.keystone: ('tenant_update',)}"], {}), "({api.keystone: ('tenant_update',)})\n", (10083, 10119), True, 'from openstack_dashboard.test import helpers as test\n'), ((3987, 4049), 'django.core.urlresolvers.reverse', 'reverse', (['"""horizon:idm:organizations:detail"""'], {'args': '[project.id]'}), "('horizon:idm:organizations:detail', args=[project.id])\n", (3994, 4049), False, 'from django.core.urlresolvers import reverse\n'), ((6362, 6422), 'django.core.urlresolvers.reverse', 'reverse', (['"""horizon:idm:organizations:info"""'], {'args': '[project.id]'}), "('horizon:idm:organizations:info', args=[project.id])\n", (6369, 6422), False, 'from django.core.urlresolvers import reverse\n'), ((7006, 7066), 'django.core.urlresolvers.reverse', 'reverse', (['"""horizon:idm:organizations:info"""'], {'args': '[project.id]'}), "('horizon:idm:organizations:info', args=[project.id])\n", (7013, 7066), False, 'from django.core.urlresolvers import reverse\n'), ((8366, 8429), 'django.core.urlresolvers.reverse', 'reverse', (['"""horizon:idm:organizations:contact"""'], {'args': '[project.id]'}), "('horizon:idm:organizations:contact', args=[project.id])\n", (8373, 8429), False, 'from django.core.urlresolvers import reverse\n'), ((8958, 9021), 'django.core.urlresolvers.reverse', 'reverse', (['"""horizon:idm:organizations:contact"""'], {'args': '[project.id]'}), "('horizon:idm:organizations:contact', args=[project.id])\n", (8965, 9021), False, 'from django.core.urlresolvers import reverse\n'), ((9672, 9734), 'django.core.urlresolvers.reverse', 'reverse', (['"""horizon:idm:organizations:cancel"""'], {'args': '[project.id]'}), "('horizon:idm:organizations:cancel', args=[project.id])\n", (9679, 9734), False, 'from django.core.urlresolvers import reverse\n'), ((10705, 10767), 'django.core.urlresolvers.reverse', 'reverse', (['"""horizon:idm:organizations:avatar"""'], {'args': '[project.id]'}), "('horizon:idm:organizations:avatar', args=[project.id])\n", (10712, 10767), False, 'from django.core.urlresolvers import reverse\n'), ((1369, 1397), 'mox.IsA', 'IsA', (['api.base.APIDictWrapper'], {}), '(api.base.APIDictWrapper)\n', (1372, 1397), False, 'from mox import IsA\n'), ((1466, 1474), 'mox.IsA', 'IsA', (['str'], {}), '(str)\n', (1469, 1474), False, 'from mox import IsA\n'), ((1931, 1952), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (1934, 1952), False, 'from mox import IsA\n'), ((2729, 2750), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (2732, 2750), False, 'from mox import IsA\n'), ((2868, 2889), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (2871, 2889), False, 'from mox import IsA\n'), ((3629, 3650), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (3632, 3650), False, 'from mox import IsA\n'), ((3812, 3833), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (3815, 3833), False, 'from mox import IsA\n'), ((4556, 4577), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (4559, 4577), False, 'from mox import IsA\n'), ((5834, 5855), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (5837, 5855), False, 'from mox import IsA\n'), ((5923, 5944), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (5926, 5944), False, 'from mox import IsA\n'), ((6729, 6750), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (6732, 6750), False, 'from mox import IsA\n'), ((7885, 7906), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (7888, 7906), False, 'from mox import IsA\n'), ((7974, 7995), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (7977, 7995), False, 'from mox import IsA\n'), ((8706, 8727), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (8709, 8727), False, 'from mox import IsA\n'), ((9389, 9410), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (9392, 9410), False, 'from mox import IsA\n'), ((9478, 9499), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (9481, 9499), False, 'from mox import IsA\n'), ((10363, 10384), 'mox.IsA', 'IsA', (['http.HttpRequest'], {}), '(http.HttpRequest)\n', (10366, 10384), False, 'from mox import IsA\n')]
|
import sys
from sys import argv, exit
import os
from os import linesep
from getopt import getopt, GetoptError
import re
import socks
from getpass import getuser
ERROR_ILLEGAL_ARGUMENTS = 2
def usage(error_message=None):
if error_message:
sys.stderr.write('ERROR: ' + error_message + linesep)
sys.stdout.write(linesep.join([
'Usage:',
' sftpsync.py [OPTION]... SOURCE DESTINATION',
'Pull:',
' sftpsync.py [OPTION]... [s]ftp://[user[:password]@]host[:port][/path] /path/to/local/copy',
'Push:',
' sftpsync.py [OPTION]... /path/to/local/copy [s]ftp://[user[:password]@]host[:port][/path]',
'',
'Defaults:',
' user: anonymous',
' password: <PASSWORD>',
' port: 22',
' path: /',
'',
'Options:',
'-f/--force Force the synchronization regardless of files\' presence or timestamps.',
'-F config_file Specifies an alternative per-user configuration file.',
' If a configuration file is given on the command line, the system-wide configuration file (/etc/ssh/ssh_config) will be ignored.',
' The default for the per-user configuration file is ~/.ssh/config.',
'-h/--help Prints this!',
'-i/--identity identity_file',
' Selects the file from which the identity (private key) for public key authentication is read.',
'-o ssh_option',
' Can be used to pass options to ssh in the format used in ssh_config(5). This is useful for specifying options for which there is no separate sftpsync command-line flag.',
' For full details of the options listed below, and their possible values, see ssh_config(5).',
' ProxyCommand',
'-p/--preserve: Preserves modification times, access times, and modes from the original file.',
'--proxy [user[:password]@]host[:port]',
' SOCKS proxy to use. If not provided, port will be defaulted to 1080.',
'--proxy-version SOCKS4|SOCKS5',
' Version of the SOCKS protocol to use. Default is SOCKS5.',
'-q/--quiet: Quiet mode: disables the progress meter as well as warning and diagnostic messages from ssh(1).',
'-r/--recursive: Recursively synchronize entire directories.',
'-v/--verbose: Verbose mode. Causes sftpsync to print debugging messages about their progress. This is helpful in debugging connection, authentication, and configuration problems.',
''
]))
def configure(argv):
try:
# Default configuration:
config = {
'force': False,
'preserve': False,
'quiet': False,
'recursive': False,
'verbose': False,
'private_key': None,
'proxy': None,
'proxy_version': socks.SOCKS5,
'ssh_config' : '~/.ssh/config',
'ssh_options': {},
}
opts, args = getopt(argv, 'fF:hi:o:pqrv', ['force', 'help', 'identity=', 'preserve', 'proxy=', 'proxy-version=', 'quiet', 'recursive', 'verbose'])
for opt, value in opts:
if opt in ('-h', '--help'):
usage()
exit()
if opt in ('-f', '--force'):
config['force'] = True
if opt in ('-p', '--preserve'):
config['preserve'] = True
if opt in ('-q', '--quiet'):
config['quiet'] = True
if opt in ('-r', '--recursive'):
config['recursive'] = True
if opt in ('-v', '--verbose'):
config['verbose'] = True
if opt in ('-i', '--identity'):
config['private_key'] = _validate_private_key_path(value)
if opt == '--proxy':
config['proxy'] = _validate_and_parse_socks_proxy(value)
if opt == '--proxy-version':
config['proxy_version'] = _validate_and_parse_socks_proxy_version(value)
if opt == '-F':
config['ssh_config'] = _validate_ssh_config_path(value)
if opt == '-o':
k, v = _validate_ssh_option(value)
config['ssh_options'][k] = v
if config['verbose'] and config['quiet']:
raise ValueError('Please provide either -q/--quiet OR -v/--verbose, but NOT both at the same time.')
if len(args) != 2:
raise ValueError('Please provide a source and a destination. Expected 2 arguments but got %s: %s.' % (len(args), args))
(source, destination) = args
config['source'] = _validate_source(source)
config['destination'] = _validate_destination(destination)
return config
except GetoptError as e:
usage(str(e))
exit(ERROR_ILLEGAL_ARGUMENTS)
except ValueError as e:
usage(str(e))
exit(ERROR_ILLEGAL_ARGUMENTS)
def _validate_private_key_path(path):
if not path:
raise ValueError('Invalid path: "%s". Please provide a valid path to your private key.' % path)
if not os.path.exists(path):
raise ValueError('Invalid path: "%s". Provided path does NOT exist. Please provide a valid path to your private key.' % path)
return path
def _validate_ssh_config_path(path):
if not path:
raise ValueError('Invalid path: "%s". Please provide a valid path to your SSH configuration.' % path)
if not os.path.exists(path):
raise ValueError('Invalid path: "%s". Provided path does NOT exist. Please provide a valid path to your SSH configuration.' % path)
return path
def _validate_ssh_option(option, white_list=['ProxyCommand']):
key_value = option.split('=', 1) if '=' in option else option.split(' ', 1)
if not key_value or not len(key_value) == 2:
raise ValueError('Invalid SSH option: "%s".' % option)
key, value = key_value
if not key or not value:
raise ValueError('Invalid SSH option: "%s".' % option)
if key not in white_list:
raise ValueError('Unsupported SSH option: "%s". Only the following SSH options are currently supported: %s.' % (key, ', '.join(white_list)))
return key, value
_USER = 'user'
_PASS = '<PASSWORD>'
_HOST = 'host'
_PORT = 'port'
_PATH = 'path'
_DRIVE = 'drive'
_FILEPATH = 'filepath'
_PATTERNS = {
_USER: r'.+?',
_PASS: r'.+?',
_HOST: r'[\w\-\.]{3,}?',
_PORT: r'|\d{1,4}|6553[0-5]|655[0-2]\d|65[0-4]\d{2}|6[0-4]\d{3}|[0-5]\d{4}',
_PATH: r'/.*',
_DRIVE: r'[a-zA-Z]{1}:',
_FILEPATH: r'.*?',
}
def _group(name, patterns=_PATTERNS):
return '(?P<%s>%s)' % (name, patterns[name])
_PROXY_PATTERN = '^(%s(:%s)?@)?%s(:%s)?$' % (_group(_USER), _group(_PASS), _group(_HOST), _group(_PORT))
_SFTP_PATTERN = '^s?ftp://(%s(:%s)?@)?%s(:%s)?%s?$' % (_group(_USER), _group(_PASS), _group(_HOST), _group(_PORT), _group(_PATH))
_PATH_PATTERN = '^%s?%s$' % (_group(_DRIVE), _group(_FILEPATH))
def _validate_and_parse_socks_proxy(proxy):
return _validate_and_parse_connection_string(proxy, _PROXY_PATTERN, 'Invalid proxy: "%s".' % proxy)
def _validate_and_parse_sftp(sftp):
return _validate_and_parse_connection_string(sftp, _SFTP_PATTERN, 'Invalid SFTP connection details: "%s".' % sftp)
def _validate_and_parse_connection_string(connection_string, pattern, error_message):
'''
Parses the provided connection string against the provided pattern into a dictionary, if there is a match,
or raises exception if no match.
'''
match = re.search(pattern, connection_string)
if not match:
raise ValueError(error_message)
return dict((key, value) for (key, value) in match.groupdict().items() if value)
def _validate_and_parse_socks_proxy_version(socks_version, white_list=['SOCKS4', 'SOCKS5']):
if socks_version not in white_list:
raise ValueError('Invalid SOCKS proxy version: "%s". Please choose one of the following values: { %s }.' % (socks_version, ', '.join(white_list)))
return eval('socks.%s' % socks_version)
def _validate_source(source):
if _is_sftp(source):
return _validate_and_parse_sftp(source)
if _is_path(source):
return _validate_is_readable_path(source)
raise ValueError('Invalid source. Please provide either SFTP connection details or a path to a local, existing and readable folder: %s.' % source)
def _validate_destination(destination):
if _is_sftp(destination):
return _validate_and_parse_sftp(destination)
if _is_path(destination):
return _validate_is_writable_path(destination)
raise ValueError('Invalid destination. Please provide either SFTP connection details or a path to a local, existing and writable folder: %s.' % destination)
def _is_sftp(sftp):
return re.search(_SFTP_PATTERN, sftp)
def _is_path(path):
return re.search(_PATH_PATTERN, path)
def _validate_is_readable_path(path):
if not os.path.exists(path):
raise ValueError('Invalid path. "%s" does NOT exist.' % path)
if not os.access(os.path.abspath(path), os.R_OK):
raise ValueError('Invalid path. "%s" exists but user "%s" does NOT have read access.' % (path, getuser()))
return path
def _validate_is_writable_path(path):
if not os.path.exists(path):
raise ValueError('Invalid path. "%s" does NOT exist.' % path)
if not os.access(os.path.abspath(path), os.W_OK):
raise ValueError('Invalid path. "%s" exists but user "%s" does NOT have write access.' % (path, getuser()))
return path
|
[
"os.path.abspath",
"getpass.getuser",
"getopt.getopt",
"os.path.exists",
"os.linesep.join",
"sys.stderr.write",
"re.search",
"sys.exit"
] |
[((7751, 7788), 're.search', 're.search', (['pattern', 'connection_string'], {}), '(pattern, connection_string)\n', (7760, 7788), False, 'import re\n'), ((8997, 9027), 're.search', 're.search', (['_SFTP_PATTERN', 'sftp'], {}), '(_SFTP_PATTERN, sftp)\n', (9006, 9027), False, 'import re\n'), ((9060, 9090), 're.search', 're.search', (['_PATH_PATTERN', 'path'], {}), '(_PATH_PATTERN, path)\n', (9069, 9090), False, 'import re\n'), ((253, 306), 'sys.stderr.write', 'sys.stderr.write', (["('ERROR: ' + error_message + linesep)"], {}), "('ERROR: ' + error_message + linesep)\n", (269, 306), False, 'import sys\n'), ((329, 2494), 'os.linesep.join', 'linesep.join', (['[\'Usage:\', \' sftpsync.py [OPTION]... SOURCE DESTINATION\', \'Pull:\',\n \' sftpsync.py [OPTION]... [s]ftp://[user[:password]@]host[:port][/path] /path/to/local/copy\'\n , \'Push:\',\n \' sftpsync.py [OPTION]... /path/to/local/copy [s]ftp://[user[:password]@]host[:port][/path]\'\n , \'\', \'Defaults:\', \' user: anonymous\',\n \' password: <PASSWORD>\', \' port: 22\', \' path: /\', \'\',\n \'Options:\',\n "-f/--force Force the synchronization regardless of files\' presence or timestamps."\n ,\n \'-F config_file Specifies an alternative per-user configuration file.\',\n \' If a configuration file is given on the command line, the system-wide configuration file (/etc/ssh/ssh_config) will be ignored.\'\n ,\n \' The default for the per-user configuration file is ~/.ssh/config.\'\n , \'-h/--help Prints this!\', \'-i/--identity identity_file\',\n \' Selects the file from which the identity (private key) for public key authentication is read.\'\n , \'-o ssh_option\',\n \' Can be used to pass options to ssh in the format used in ssh_config(5). This is useful for specifying options for which there is no separate sftpsync command-line flag.\'\n ,\n \' For full details of the options listed below, and their possible values, see ssh_config(5).\'\n , \' ProxyCommand\',\n \'-p/--preserve: Preserves modification times, access times, and modes from the original file.\'\n , \'--proxy [user[:password]@]host[:port]\',\n \' SOCKS proxy to use. If not provided, port will be defaulted to 1080.\'\n , \'--proxy-version SOCKS4|SOCKS5\',\n \' Version of the SOCKS protocol to use. Default is SOCKS5.\',\n \'-q/--quiet: Quiet mode: disables the progress meter as well as warning and diagnostic messages from ssh(1).\'\n , \'-r/--recursive: Recursively synchronize entire directories.\',\n \'-v/--verbose: Verbose mode. Causes sftpsync to print debugging messages about their progress. This is helpful in debugging connection, authentication, and configuration problems.\'\n , \'\']'], {}), '([\'Usage:\', \' sftpsync.py [OPTION]... SOURCE DESTINATION\',\n \'Pull:\',\n \' sftpsync.py [OPTION]... [s]ftp://[user[:password]@]host[:port][/path] /path/to/local/copy\'\n , \'Push:\',\n \' sftpsync.py [OPTION]... /path/to/local/copy [s]ftp://[user[:password]@]host[:port][/path]\'\n , \'\', \'Defaults:\', \' user: anonymous\',\n \' password: <PASSWORD>\', \' port: 22\', \' path: /\', \'\',\n \'Options:\',\n "-f/--force Force the synchronization regardless of files\' presence or timestamps."\n ,\n \'-F config_file Specifies an alternative per-user configuration file.\',\n \' If a configuration file is given on the command line, the system-wide configuration file (/etc/ssh/ssh_config) will be ignored.\'\n ,\n \' The default for the per-user configuration file is ~/.ssh/config.\'\n , \'-h/--help Prints this!\', \'-i/--identity identity_file\',\n \' Selects the file from which the identity (private key) for public key authentication is read.\'\n , \'-o ssh_option\',\n \' Can be used to pass options to ssh in the format used in ssh_config(5). This is useful for specifying options for which there is no separate sftpsync command-line flag.\'\n ,\n \' For full details of the options listed below, and their possible values, see ssh_config(5).\'\n , \' ProxyCommand\',\n \'-p/--preserve: Preserves modification times, access times, and modes from the original file.\'\n , \'--proxy [user[:password]@]host[:port]\',\n \' SOCKS proxy to use. If not provided, port will be defaulted to 1080.\'\n , \'--proxy-version SOCKS4|SOCKS5\',\n \' Version of the SOCKS protocol to use. Default is SOCKS5.\',\n \'-q/--quiet: Quiet mode: disables the progress meter as well as warning and diagnostic messages from ssh(1).\'\n , \'-r/--recursive: Recursively synchronize entire directories.\',\n \'-v/--verbose: Verbose mode. Causes sftpsync to print debugging messages about their progress. This is helpful in debugging connection, authentication, and configuration problems.\'\n , \'\'])\n', (341, 2494), False, 'from os import linesep\n'), ((3114, 3251), 'getopt.getopt', 'getopt', (['argv', '"""fF:hi:o:pqrv"""', "['force', 'help', 'identity=', 'preserve', 'proxy=', 'proxy-version=',\n 'quiet', 'recursive', 'verbose']"], {}), "(argv, 'fF:hi:o:pqrv', ['force', 'help', 'identity=', 'preserve',\n 'proxy=', 'proxy-version=', 'quiet', 'recursive', 'verbose'])\n", (3120, 3251), False, 'from getopt import getopt, GetoptError\n'), ((5251, 5271), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5265, 5271), False, 'import os\n'), ((5599, 5619), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5613, 5619), False, 'import os\n'), ((9141, 9161), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (9155, 9161), False, 'import os\n'), ((9468, 9488), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (9482, 9488), False, 'import os\n'), ((4962, 4991), 'sys.exit', 'exit', (['ERROR_ILLEGAL_ARGUMENTS'], {}), '(ERROR_ILLEGAL_ARGUMENTS)\n', (4966, 4991), False, 'from sys import argv, exit\n'), ((5050, 5079), 'sys.exit', 'exit', (['ERROR_ILLEGAL_ARGUMENTS'], {}), '(ERROR_ILLEGAL_ARGUMENTS)\n', (5054, 5079), False, 'from sys import argv, exit\n'), ((9254, 9275), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (9269, 9275), False, 'import os\n'), ((9581, 9602), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (9596, 9602), False, 'import os\n'), ((3360, 3366), 'sys.exit', 'exit', ([], {}), '()\n', (3364, 3366), False, 'from sys import argv, exit\n'), ((9390, 9399), 'getpass.getuser', 'getuser', ([], {}), '()\n', (9397, 9399), False, 'from getpass import getuser\n'), ((9718, 9727), 'getpass.getuser', 'getuser', ([], {}), '()\n', (9725, 9727), False, 'from getpass import getuser\n')]
|
# -*- coding: utf-8 -*-
from flexipy import Faktura
from flexipy import config
import requests
import json
class TestFaktura:
def setup(self):
self.conf = config.TestingConfig()
server_settings = self.conf.get_server_config()
self.username = str(server_settings['username'])
self.password = str(server_settings['password'])
self.url = str(server_settings['url'])
self.faktura = Faktura(self.conf)
def test_get_all_vydane_faktury(self):
r = requests.get(self.url+'faktura-vydana.json' ,auth=(self.username,self.password), verify=False)
d = r.json()
if len(d['winstrom']['faktura-vydana']):
list_of_invoices_expected = d['winstrom']['faktura-vydana'][0]
else:
list_of_invoices_expected = d['winstrom']['faktura-vydana']
list_of_invoices_actual = self.faktura.get_all_vydane_faktury()
assert list_of_invoices_expected == list_of_invoices_actual
def test_get_all_prijate_faktury(self):
r = requests.get(self.url+'faktura-prijata.json' ,auth=(self.username,self.password), verify=False)
d = r.json()
if(len(d['winstrom']['faktura-prijata']) == 1):
list_of_invoices_expected = d['winstrom']['faktura-prijata'][0]
else:
list_of_invoices_expected = d['winstrom']['faktura-prijata']
list_of_invoices_actual = self.faktura.get_all_prijate_faktury()
assert list_of_invoices_expected == list_of_invoices_actual
def test_create_vydana_faktura(self):
expected_data = {'kod':'flex11','typDokl':'code:FAKTURA','firma':'code:201','popis':'Flexipy test invoice', 'sumDphZakl':'0.0','bezPolozek':'true', 'varSym':'11235484','zdrojProSkl':'false'}
dalsi_param = {'popis':'Flexipy test invoice','firma':'code:201'}
result = self.faktura.create_vydana_faktura(kod='flex11', var_sym='11235484', datum_vyst='2013-02-28', zdroj_pro_sklad=False, typ_dokl=self.conf.get_typy_faktury_vydane()[0], dalsi_param=dalsi_param)
assert result[0] == True #expected True
id = result[1]
actualData = self.faktura.get_vydana_faktura(id, detail='full')
assert actualData['kod'].lower() == expected_data['kod'].lower()
assert actualData['typDokl'] == expected_data['typDokl']
assert actualData['firma'] == expected_data['firma']
assert actualData['popis'] == expected_data['popis']
assert actualData['sumDphZakl'] == expected_data['sumDphZakl']
#uklid po sobe
self.faktura.delete_vydana_faktura(id)
def test_create_vydana_faktura_polozky(self):
polozky = [{'typPolozkyK':self.conf.get_typ_polozky_vydane()[0],'zdrojProSkl':False,'nazev':'vypujceni auta','ucetni':True,'cenaMj':'4815.0'}]
expected_data = {'kod':'flex12','typDokl':'code:FAKTURA','firma':'code:201','popis':'Flexipy test invoice',
'varSym':'11235484','zdrojProSkl':'false','polozkyFaktury':polozky}
expected_polozky = [{'typPolozkyK':'typPolozky.obecny','zdrojProSkl':'false','nazev':'vypujceni auta','ucetni':'true','cenaMj':'4815.0'}]
dalsi_param = {'popis':'Flexipy test invoice','firma':'code:201','typUcOp':u'code:TRŽBA SLUŽBY'}
result = self.faktura.create_vydana_faktura(kod='flex12', var_sym='11235484', datum_vyst='2013-02-28',
zdroj_pro_sklad=False, typ_dokl=self.conf.get_typy_faktury_vydane()[0], dalsi_param=dalsi_param, polozky_faktury=polozky)
assert result[0] == True #expected True
id = result[1]
actualData = self.faktura.get_vydana_faktura(id, detail='full')
assert actualData['kod'].lower() == expected_data['kod'].lower()
assert actualData['typDokl'] == expected_data['typDokl']
assert actualData['firma'] == expected_data['firma']
assert actualData['popis'] == expected_data['popis']
#pocet polozek se musi rovnat
assert len(actualData['polozkyFaktury']) == len(expected_polozky)
actual_polozky = actualData['polozkyFaktury'][0]
assert actual_polozky['typPolozkyK'] == expected_polozky[0]['typPolozkyK']
assert actual_polozky['nazev'] == expected_polozky[0]['nazev']
assert actual_polozky['cenaMj'] == expected_polozky[0]['cenaMj']
#uklid po sobe
self.faktura.delete_vydana_faktura(id)
|
[
"flexipy.Faktura",
"flexipy.config.TestingConfig",
"requests.get"
] |
[((161, 183), 'flexipy.config.TestingConfig', 'config.TestingConfig', ([], {}), '()\n', (181, 183), False, 'from flexipy import config\n'), ((394, 412), 'flexipy.Faktura', 'Faktura', (['self.conf'], {}), '(self.conf)\n', (401, 412), False, 'from flexipy import Faktura\n'), ((460, 562), 'requests.get', 'requests.get', (["(self.url + 'faktura-vydana.json')"], {'auth': '(self.username, self.password)', 'verify': '(False)'}), "(self.url + 'faktura-vydana.json', auth=(self.username, self.\n password), verify=False)\n", (472, 562), False, 'import requests\n'), ((926, 1029), 'requests.get', 'requests.get', (["(self.url + 'faktura-prijata.json')"], {'auth': '(self.username, self.password)', 'verify': '(False)'}), "(self.url + 'faktura-prijata.json', auth=(self.username, self.\n password), verify=False)\n", (938, 1029), False, 'import requests\n')]
|
"""
This module contains methods/objects that facilitate
basic operations.
"""
# std pkgs
import numpy as np
import random
from typing import Dict, List, Optional, Union
from pathlib import Path
import pickle
# non-std pkgs
import matplotlib.pyplot as plt
def hamming_dist(k1, k2):
val = 0
for ind, char in enumerate(k1):
if char != k2[ind]: val += 1
return val
def clean_input_sequences(input_seq):
"""
This method cleans all input sequences to ensure they will
be compatible with the precomputed hash table.
"""
seq_list = []
for aa in input_seq:
if aa not in ["A", "R", "N", "D", "C", "Q", "E", "G", "H", "I", "L", "K", "M", "F", "P", "S", "T", "W", "Y", "V"]:
print(aa)
if aa == "*":
amino_chosen = "G"
elif aa == "B":
amino_chosen = np.random.choice(["N", "D"], 1, p=[0.5, 0.5])[0]
elif aa == "Z":
amino_chosen = np.random.choice(["Q", "E"], 1, p=[0.5, 0.5])[0]
elif aa == "J":
amino_chosen = np.random.choice(["L", "I"], 1, p=[0.5, 0.5])[0]
elif aa == "X":
amino_chosen = random.choice(["A", "R", "N", "D", "C",
"Q", "E", "G", "H", "I",
"L", "K", "M", "F", "P",
"S", "T", "W", "Y", "V"])[0]
else:
amino_chosen = aa
seq_list.append(amino_chosen)
return ''.join(seq_list) #+ input_seq[kmer_size+1:]
def readFasta(fasta_file_path: Union[str, Path]):
"""
This function reads a fasta file
Parameters
----------
fasta file path: string OR Path
Returns
-------
proteins : array of protein sequence (ordered)
protein_names : array of protein names (ordered)
"""
proteins, protein_names = [], []
with open(fasta_file_path) as fasta_file:
fasta_file_array = fasta_file.readlines()
for line_count, fasta_line in enumerate(fasta_file_array):
if (fasta_line[0] == ">"):
name = fasta_line.strip("\n")
protein_names.append(name)
proteins.append(protein_seq) if line_count > 0 else None
protein_seq = "" # renew sequence everytime fasta name is added.
else:
protein_seq += fasta_line.strip("\n")
proteins.append(protein_seq)
return proteins, protein_names
def get_kmer_size(hash_table) -> int:
"""
This function extracts the kmer size from
the hash table.
"""
kmer_size = 0
with open(hash_table, "rb") as hash_tb:
hash = pickle.load(hash_tb)
kmer_size = len(list(hash.keys())[0])
return kmer_size
|
[
"pickle.load",
"random.choice",
"numpy.random.choice"
] |
[((2668, 2688), 'pickle.load', 'pickle.load', (['hash_tb'], {}), '(hash_tb)\n', (2679, 2688), False, 'import pickle\n'), ((856, 901), 'numpy.random.choice', 'np.random.choice', (["['N', 'D']", '(1)'], {'p': '[0.5, 0.5]'}), "(['N', 'D'], 1, p=[0.5, 0.5])\n", (872, 901), True, 'import numpy as np\n'), ((956, 1001), 'numpy.random.choice', 'np.random.choice', (["['Q', 'E']", '(1)'], {'p': '[0.5, 0.5]'}), "(['Q', 'E'], 1, p=[0.5, 0.5])\n", (972, 1001), True, 'import numpy as np\n'), ((1056, 1101), 'numpy.random.choice', 'np.random.choice', (["['L', 'I']", '(1)'], {'p': '[0.5, 0.5]'}), "(['L', 'I'], 1, p=[0.5, 0.5])\n", (1072, 1101), True, 'import numpy as np\n'), ((1156, 1275), 'random.choice', 'random.choice', (["['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P',\n 'S', 'T', 'W', 'Y', 'V']"], {}), "(['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K',\n 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V'])\n", (1169, 1275), False, 'import random\n')]
|
import asyncio
import logging
import uuid
import ssl
from gmqtt import Client as MQTTClient
from .exceptions import ConnectionFailed, DestinationNotAvailable
from .base import BaseConnector
logger = logging.getLogger(__name__)
class GMQTTConnector(BaseConnector):
"""GMQTTConnector uses gmqtt library for connectors
running over MQTT.
"""
def __init__(self, host, port, subscribe_topic, publish_topic, **kwargs):
self.host = host
self.port = port
# topics
self.subscribe_topic = subscribe_topic
self.publish_topic = publish_topic
# connection
self.connection_id = uuid.uuid4().hex[:8]
self.is_connected = False
self.client = MQTTClient(self.connection_id)
# callbacks
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.on_disconnect = self.on_disconnect
self.client.on_subscribe = self.on_subscribe
self.STOP = asyncio.Event()
# options
self.ack_topic = kwargs.get('ack_topic')
self.enable_ssl = kwargs.get('enable_ssl', False)
self.enable_auth = kwargs.get('enable_auth', False)
self.username = kwargs.get('username')
self.password = kwargs.get('password')
self.client_cert = kwargs.get('client_cert')
self.client_key = kwargs.get('client_key')
self.qos = kwargs.get('qos', 2)
def get_connection_details(self):
"""get_connection_details returns the details
about the current MQTT connection.
"""
return dict(
connection_id=self.connection_id,
host=self.host,
port=self.port,
is_connected=self.is_connected,
subscribe_topic=self.subscribe_topic,
publish_topic=self.publish_topic
)
def on_connect(self, *args):
"""on_connect is a callback that gets exectued after the
connection is made.
Arguments:
client {MQTTClient} -- gmqtt.MQTTClient
flags {int} -- connection flags
rc {int} -- connection result code
properties {dict} -- config of the current connection
"""
logger.info("Connected with result code %s", str(args[2]))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# client.subscribe("$SYS/#", qos=0)
if isinstance(self.subscribe_topic, str):
self.client.subscribe(self.subscribe_topic, qos=self.qos)
elif isinstance(self.subscribe_topic, list):
for topic in self.subscribe_topic:
self.client.subscribe(topic, qos=self.qos)
else:
logger.warning('subscribe_topic is either None or an unknown data type.'
' Currently subscribed to 0 topics.')
async def on_message(self, *args):
"""on_message callback gets executed when the connection receives
a message.
Arguments:
client {MQTTClient} -- gmqtt.MQTTClient
topic {string} -- topic from which message was received
payload {bytes} -- actual message bytes received
qos {string} -- message QOS level (0,1,2)
properties {dict} -- message properties
"""
logger.info("%s %s", args[1], str(args[2]))
return 0
@staticmethod
def on_disconnect(*args):
"""on_disconnect is a callback that gets executed
after a disconnection occurs"""
logger.info('Disconnected')
@staticmethod
def on_subscribe(*args):
"""on_subscribe is a callback that gets executed
after a subscription is succesful"""
logger.info('Subscribed')
def ask_exit(self):
"""sets the STOP variable so that a signal gets sent
to disconnect the client
"""
self.STOP.set()
async def start(self):
"""starts initiates the connnection with the broker
Raises:
DestinationNotAvailable: If broker is not available
ConnectionFailed: If connection failed due to any other reason
"""
try:
conn_kwargs = dict(host=self.host, port=self.port)
if self.enable_auth:
self.client.set_auth_credentials(self.username, self.password)
if self.enable_ssl:
assert self.client_cert and self.client_key, \
"Cannot enable ssl without specifying client_cert and client_key"
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
ssl_context.load_cert_chain(self.client_cert,
keyfile=self.client_key)
conn_kwargs.update(dict(ssl=ssl_context))
await self.client.connect(**conn_kwargs)
self.is_connected = True
except ConnectionRefusedError as e:
# raising from None suppresses the exception chain
raise DestinationNotAvailable(
f'Connection Failed: Error connecting to'
f' {self.host}:{self.port} - {e}'
) from None
except Exception as e:
raise ConnectionFailed(e)
async def publish(self, *args, **kwargs):
"""publishes the message to the topic using client.publish"""
self.client.publish(*args, **kwargs)
async def stop(self):
"""force stop the connection with the MQTT broker."""
await self.client.disconnect()
self.is_connected = False
|
[
"ssl.SSLContext",
"uuid.uuid4",
"gmqtt.Client",
"asyncio.Event",
"logging.getLogger"
] |
[((203, 230), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (220, 230), False, 'import logging\n'), ((722, 752), 'gmqtt.Client', 'MQTTClient', (['self.connection_id'], {}), '(self.connection_id)\n', (732, 752), True, 'from gmqtt import Client as MQTTClient\n'), ((1000, 1015), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (1013, 1015), False, 'import asyncio\n'), ((645, 657), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (655, 657), False, 'import uuid\n'), ((4616, 4652), 'ssl.SSLContext', 'ssl.SSLContext', (['ssl.PROTOCOL_TLSv1_2'], {}), '(ssl.PROTOCOL_TLSv1_2)\n', (4630, 4652), False, 'import ssl\n')]
|
import matplotlib.pyplot as plt
from hyperion.model import ModelOutput
from hyperion.util.constants import pc
mo = ModelOutput('pure_scattering.rtout')
image_fnu = mo.get_image(inclination=0, units='MJy/sr', distance=300. * pc)
image_pol = mo.get_image(inclination=0, stokes='linpol')
fig = plt.figure(figsize=(8, 8))
# Make total intensity sub-plot
ax = fig.add_axes([0.1, 0.3, 0.4, 0.4])
ax.imshow(image_fnu.val[:, :, 0], extent=[-13, 13, -13, 13],
interpolation='none', cmap=plt.cm.gist_heat,
origin='lower', vmin=0., vmax=4e9)
ax.set_xlim(-13., 13.)
ax.set_ylim(-13., 13.)
ax.set_xlabel("x (solar radii)")
ax.set_ylabel("y (solar radii)")
ax.set_title("Surface brightness")
# Make linear polarization sub-plot
ax = fig.add_axes([0.51, 0.3, 0.4, 0.4])
im = ax.imshow(image_pol.val[:, :, 0] * 100., extent=[-13, 13, -13, 13],
interpolation='none', cmap=plt.cm.gist_heat,
origin='lower', vmin=0., vmax=100.)
ax.set_xlim(-13., 13.)
ax.set_ylim(-13., 13.)
ax.set_xlabel("x (solar radii)")
ax.set_title("Linear Polarization")
ax.set_yticklabels('')
axcb = fig.add_axes([0.92, 0.3, 0.02, 0.4])
cb=plt.colorbar(im, cax=axcb)
cb.set_label('%')
fig.savefig('pure_scattering_inner_disk.png', bbox_inches='tight')
|
[
"hyperion.model.ModelOutput",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.colorbar"
] |
[((116, 152), 'hyperion.model.ModelOutput', 'ModelOutput', (['"""pure_scattering.rtout"""'], {}), "('pure_scattering.rtout')\n", (127, 152), False, 'from hyperion.model import ModelOutput\n'), ((294, 320), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (304, 320), True, 'import matplotlib.pyplot as plt\n'), ((1152, 1178), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'cax': 'axcb'}), '(im, cax=axcb)\n', (1164, 1178), True, 'import matplotlib.pyplot as plt\n')]
|
"""Day 4: Giant Squid"""
import sys, os, inspect
# necessary to import aoc2021/utils.py moudule
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import utils
exampledata = ['0,9 -> 5,9', '8,0 -> 0,8', '9,4 -> 3,4', '2,2 -> 2,1', '7,0 -> 7,4', '6,4 -> 2,0', '0,9 -> 2,9', '3,4 -> 1,4', '0,0 -> 8,8', '5,5 -> 8,2']
def input_parser(inputdata):
"""Given the input of the puzzle represent it with a list of tuples
Parameters
----------
inputdata : list
A list of strings, each being a line of the raw input file.
Returns
----------
inputdata : list
A list of strings, each being a line of the raw input file.
max_x : int
Max x coordinate value from all points
max_y : int
Max y coordinate value from all points
"""
res = []
max_x = 0
max_y = 0
for line in inputdata:
pointpair = ()
for strpoint in line.split('->'):
strpoint = strpoint.strip()
point = ()
for indexcoord, strcoord in enumerate(strpoint.split(',')):
valuecoord = int(strcoord)
point += (valuecoord,)
if(indexcoord==0 and max_x<valuecoord):
max_x = valuecoord
elif(0<indexcoord and max_y<valuecoord):
max_y = valuecoord
pointpair += (point,)
res.append(pointpair)
# return a list of points-pair (x1,y1) and (x2,y2)
# each point is a pair x,y coordinates
return res, max_x, max_y
def closedrange(start, stop):
"Return all values in the interval [start,stop] no matter which one is greater"
step = 1 if (start<=stop) else -1
return range(start, stop + step, step)
def vent_mapper(inputdata, include_diagonal_lines=False):
"""Given the already parsed input data from puzzle aoc2021day05 return the final solution
Parameters
----------
inputdata : list
A list of tuples, each tuple representing a pair of points. Each point itself is a tuple (int,int).
include_diagonal_lines : bool (Default=False)
If points describe a diagonal line, include them in the mapping.
The default behavior is to only include vertical o diagonal lines
"""
ventpointpairs, max_x, max_y = input_parser(inputdata)
ventmap = [[0]*(max_x+1) for i in range(max_y+1)] # to index the ventmap: ventmap[y][x]
for ventsegment in ventpointpairs:
x1,y1 = ventsegment[0]
x2,y2 = ventsegment[1]
# only horizontal and vertical lines
if(x1 == x2):
for y in closedrange(y1, y2):
ventmap[y][x1] += 1
elif(y1 == y2):
for x in closedrange(x1, x2):
ventmap[y1][x] += 1
# diagonal line at exactly 45 degrees
elif (include_diagonal_lines):
for x,y in closedrange_diag( (x1,y1), (x2,y2) ):
ventmap[y][x] += 1
return vent_counter(ventmap,2)
def closedrange_diag(start, stop):
"Return all points (x,y) from a 45º diagonal line from (x1,y1) to (x2,y2)"
x1, y1 = start
x2, y2 = stop
return zip(closedrange(x1, x2), closedrange(y1, y2))
def vent_counter(ventmap, overlap):
res = 0
for ventrow in ventmap:
for ventelem in ventrow:
if (overlap <= ventelem):
res +=1
return res
def main():
inputdata = []
# run script with arguments: load the input file
if(2 <= len(sys.argv)):
inputdata = utils.loadinput(sys.argv[1])
# run script with no arguments: load example data
else:
inputdata = exampledata
print(f"Puzzle input (example)")
print(f"{exampledata}\n")
print(f"Answer (part 1): {vent_mapper(inputdata)}\n") # Correct example answer: 5
print(f"Answer (part 2): {vent_mapper(inputdata, True)}") # Correct example answer: 12
pass
if __name__ == "__main__":
main()
|
[
"os.path.dirname",
"os.path.join",
"utils.loadinput",
"inspect.currentframe"
] |
[((199, 226), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (214, 226), False, 'import sys, os, inspect\n'), ((246, 277), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""'], {}), "(sys.path[0], '..')\n", (258, 277), False, 'import sys, os, inspect\n'), ((3679, 3707), 'utils.loadinput', 'utils.loadinput', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (3694, 3707), False, 'import utils\n'), ((161, 183), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (181, 183), False, 'import sys, os, inspect\n')]
|
import numpy as np
import cv2
import os
from glob import glob
from tqdm import tqdm
img_h, img_w = 256, 256
means, stdevs = [], []
img_list = []
TRAIN_DATASET_PATH = 'data/Real/subset/train/B'
image_fns = glob(os.path.join(TRAIN_DATASET_PATH, '*.*'))
for single_img_path in tqdm(image_fns):
img = cv2.imread(single_img_path)
img = cv2.resize(img, (img_w, img_h))
img = img[:, :, :, np.newaxis]
img_list.append(img)
imgs = np.concatenate(img_list, axis=3)
imgs = imgs.astype(np.float32) / 255.
for i in range(3):
pixels = imgs[:, :, i, :].ravel() # 拉成一行
means.append(np.mean(pixels))
stdevs.append(np.std(pixels))
# BGR --> RGB , CV读取的需要转换,PIL读取的不用转换
means.reverse()
stdevs.reverse()
print("normMean = {}".format(means))
print("normStd = {}".format(stdevs))
# normMean = [0.35389897, 0.39104056, 0.34307468]
# normStd = [0.2158508, 0.23398565, 0.20874721]
# normMean1 = [0.47324282, 0.498616, 0.46873462]
# normStd1 = [0.2431127, 0.2601882, 0.25678185]
# [0.413570895, 0.44482827999999996, 0.40590465]
# [0.22948174999999998, 0.24708692499999999, 0.23276452999999997]
|
[
"tqdm.tqdm",
"numpy.concatenate",
"numpy.std",
"cv2.imread",
"numpy.mean",
"os.path.join",
"cv2.resize"
] |
[((278, 293), 'tqdm.tqdm', 'tqdm', (['image_fns'], {}), '(image_fns)\n', (282, 293), False, 'from tqdm import tqdm\n'), ((444, 476), 'numpy.concatenate', 'np.concatenate', (['img_list'], {'axis': '(3)'}), '(img_list, axis=3)\n', (458, 476), True, 'import numpy as np\n'), ((213, 252), 'os.path.join', 'os.path.join', (['TRAIN_DATASET_PATH', '"""*.*"""'], {}), "(TRAIN_DATASET_PATH, '*.*')\n", (225, 252), False, 'import os\n'), ((305, 332), 'cv2.imread', 'cv2.imread', (['single_img_path'], {}), '(single_img_path)\n', (315, 332), False, 'import cv2\n'), ((343, 374), 'cv2.resize', 'cv2.resize', (['img', '(img_w, img_h)'], {}), '(img, (img_w, img_h))\n', (353, 374), False, 'import cv2\n'), ((598, 613), 'numpy.mean', 'np.mean', (['pixels'], {}), '(pixels)\n', (605, 613), True, 'import numpy as np\n'), ((633, 647), 'numpy.std', 'np.std', (['pixels'], {}), '(pixels)\n', (639, 647), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import tensorflow as tf
# 【该方法测试的时候使用】返回一个方法。这个方法根据输入的值,得到对应的索引,再得到这个词的embedding.
def extract_argmax_and_embed(embedding, output_projection=None):
"""
Get a loop_function that extracts the previous symbol and embeds it. Used by decoder.
:param embedding: embedding tensor for symbol
:param output_projection: None or a pair (W, B). If provided, each fed previous output will
first be multiplied by W and added B.
:return: A loop function
"""
def loop_function(prev, _):
if output_projection is not None:
prev = tf.matmul(prev, output_projection[0]) + output_projection[1]
prev_symbol = tf.argmax(prev, 1) #得到对应的INDEX
emb_prev = tf.gather(embedding, prev_symbol) #得到这个INDEX对应的embedding
return emb_prev
return loop_function
# RNN的解码部分。
# 如果是训练,使用训练数据的输入;如果是test,将t时刻的输出作为t+1时刻的s输入
def rnn_decoder_with_attention(decoder_inputs, initial_state, cell, loop_function,attention_states,scope=None):#3D Tensor [batch_size x attn_length x attn_size]
"""RNN decoder for the sequence-to-sequence model.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].it is decoder input.
initial_state: 2D Tensor with shape [batch_size x cell.state_size].it is the encoded vector of input sentences, which represent 'thought vector'
cell: core_rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to the i-th output
in order to generate the i+1-st input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].it is represent input X.
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing generated outputs.
state: The state of each cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
(Note that in some cases, like basic RNN cell or GRU cell, outputs and
states can be the same. They are different for LSTM cells though.)
"""
with tf.variable_scope(scope or "rnn_decoder"):
print("rnn_decoder_with_attention started...")
state = initial_state #[batch_size x cell.state_size].
_, hidden_size = state.get_shape().as_list() #200
attention_states_original=attention_states
batch_size,sequence_length,_=attention_states.get_shape().as_list()
outputs = []
prev = None
#################################################
for i, inp in enumerate(decoder_inputs):#循环解码部分的输入。如sentence_length个[batch_size x input_size]
# 如果是训练,使用训练数据的输入;如果是test, 将t时刻的输出作为t + 1 时刻的s输入
if loop_function is not None and prev is not None:#测试的时候:如果loop_function不为空且前一个词的值不为空,那么使用前一个的值作为RNN的输入
with tf.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
if i > 0:
tf.get_variable_scope().reuse_variables()
##ATTENTION#################################################################################################################################################
# 1.get logits of attention for each encoder input. attention_states:[batch_size x attn_length x attn_size]; query=state:[batch_size x cell.state_size]
query=state
W_a = tf.get_variable("W_a", shape=[hidden_size, hidden_size],initializer=tf.random_normal_initializer(stddev=0.1))
query=tf.matmul(query, W_a) #[batch_size,hidden_size]
query=tf.expand_dims(query,axis=1) #[batch_size, 1, hidden_size]
U_a = tf.get_variable("U_a", shape=[hidden_size, hidden_size],initializer=tf.random_normal_initializer(stddev=0.1))
U_aa = tf.get_variable("U_aa", shape=[ hidden_size])
attention_states=tf.reshape(attention_states,shape=(-1,hidden_size)) #[batch_size*sentence_length,hidden_size]
attention_states=tf.matmul(attention_states, U_a) #[batch_size*sentence_length,hidden_size]
#print("batch_size",batch_size," ;sequence_length:",sequence_length," ;hidden_size:",hidden_size) #print("attention_states:", attention_states) #(?, 200)
attention_states=tf.reshape(attention_states,shape=(-1,sequence_length,hidden_size)) # TODO [batch_size,sentence_length,hidden_size]
#query_expanded: [batch_size,1, hidden_size]
#attention_states_reshaped: [batch_size,sentence_length,hidden_size]
attention_logits=tf.nn.tanh(query+attention_states+U_aa) #[batch_size,sentence_length,hidden_size]. additive style
# 2.get possibility of attention
attention_logits=tf.reshape(attention_logits,shape=(-1,hidden_size)) #batch_size*sequence_length [batch_size*sentence_length,hidden_size]
V_a = tf.get_variable("V_a", shape=[hidden_size,1],initializer=tf.random_normal_initializer(stddev=0.1)) #[hidden_size,1]
attention_logits=tf.matmul(attention_logits,V_a) #最终需要的是[batch_size*sentence_length,1]<-----[batch_size*sentence_length,hidden_size],[hidden_size,1]
attention_logits=tf.reshape(attention_logits,shape=(-1,sequence_length)) #attention_logits:[batch_size,sequence_length]
##########################################################################################################################################################
#attention_logits=tf.reduce_sum(attention_logits,2) #[batch_size x attn_length]
attention_logits_max=tf.reduce_max(attention_logits,axis=1,keep_dims=True) #[batch_size x 1]
# possibility distribution for each encoder input.it means how much attention or focus for each encoder input
p_attention=tf.nn.softmax(attention_logits-attention_logits_max)#[batch_size x attn_length]
# 3.get weighted sum of hidden state for each encoder input as attention state
p_attention=tf.expand_dims(p_attention,axis=2) #[batch_size x attn_length x 1]
# attention_states:[batch_size x attn_length x attn_size]; p_attention:[batch_size x attn_length];
attention_final=tf.multiply(attention_states_original,p_attention) #[batch_size x attn_length x attn_size]
context_vector=tf.reduce_sum(attention_final,axis=1) #[batch_size x attn_size]
############################################################################################################################################################
#inp:[batch_size x input_size].it is decoder input; attention_final:[batch_size x attn_size]
output, state = cell(inp, state,context_vector) #attention_final TODO 使用RNN走一步
outputs.append(output) # 将输出添加到结果列表中
if loop_function is not None:
prev = output
print("rnn_decoder_with_attention ended...")
return outputs, state
|
[
"tensorflow.nn.softmax",
"tensorflow.reduce_sum",
"tensorflow.nn.tanh",
"tensorflow.argmax",
"tensorflow.gather",
"tensorflow.reshape",
"tensorflow.get_variable_scope",
"tensorflow.variable_scope",
"tensorflow.matmul",
"tensorflow.multiply",
"tensorflow.random_normal_initializer",
"tensorflow.reduce_max",
"tensorflow.expand_dims",
"tensorflow.get_variable"
] |
[((671, 689), 'tensorflow.argmax', 'tf.argmax', (['prev', '(1)'], {}), '(prev, 1)\n', (680, 689), True, 'import tensorflow as tf\n'), ((721, 754), 'tensorflow.gather', 'tf.gather', (['embedding', 'prev_symbol'], {}), '(embedding, prev_symbol)\n', (730, 754), True, 'import tensorflow as tf\n'), ((2771, 2812), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'rnn_decoder')"], {}), "(scope or 'rnn_decoder')\n", (2788, 2812), True, 'import tensorflow as tf\n'), ((4197, 4218), 'tensorflow.matmul', 'tf.matmul', (['query', 'W_a'], {}), '(query, W_a)\n', (4206, 4218), True, 'import tensorflow as tf\n'), ((4263, 4292), 'tensorflow.expand_dims', 'tf.expand_dims', (['query'], {'axis': '(1)'}), '(query, axis=1)\n', (4277, 4292), True, 'import tensorflow as tf\n'), ((4469, 4513), 'tensorflow.get_variable', 'tf.get_variable', (['"""U_aa"""'], {'shape': '[hidden_size]'}), "('U_aa', shape=[hidden_size])\n", (4484, 4513), True, 'import tensorflow as tf\n'), ((4544, 4597), 'tensorflow.reshape', 'tf.reshape', (['attention_states'], {'shape': '(-1, hidden_size)'}), '(attention_states, shape=(-1, hidden_size))\n', (4554, 4597), True, 'import tensorflow as tf\n'), ((4667, 4699), 'tensorflow.matmul', 'tf.matmul', (['attention_states', 'U_a'], {}), '(attention_states, U_a)\n', (4676, 4699), True, 'import tensorflow as tf\n'), ((4937, 5007), 'tensorflow.reshape', 'tf.reshape', (['attention_states'], {'shape': '(-1, sequence_length, hidden_size)'}), '(attention_states, shape=(-1, sequence_length, hidden_size))\n', (4947, 5007), True, 'import tensorflow as tf\n'), ((5243, 5286), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['(query + attention_states + U_aa)'], {}), '(query + attention_states + U_aa)\n', (5253, 5286), True, 'import tensorflow as tf\n'), ((5416, 5469), 'tensorflow.reshape', 'tf.reshape', (['attention_logits'], {'shape': '(-1, hidden_size)'}), '(attention_logits, shape=(-1, hidden_size))\n', (5426, 5469), True, 'import tensorflow as tf\n'), ((5700, 5732), 'tensorflow.matmul', 'tf.matmul', (['attention_logits', 'V_a'], {}), '(attention_logits, V_a)\n', (5709, 5732), True, 'import tensorflow as tf\n'), ((5861, 5918), 'tensorflow.reshape', 'tf.reshape', (['attention_logits'], {'shape': '(-1, sequence_length)'}), '(attention_logits, shape=(-1, sequence_length))\n', (5871, 5918), True, 'import tensorflow as tf\n'), ((6263, 6318), 'tensorflow.reduce_max', 'tf.reduce_max', (['attention_logits'], {'axis': '(1)', 'keep_dims': '(True)'}), '(attention_logits, axis=1, keep_dims=True)\n', (6276, 6318), True, 'import tensorflow as tf\n'), ((6481, 6535), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['(attention_logits - attention_logits_max)'], {}), '(attention_logits - attention_logits_max)\n', (6494, 6535), True, 'import tensorflow as tf\n'), ((6677, 6712), 'tensorflow.expand_dims', 'tf.expand_dims', (['p_attention'], {'axis': '(2)'}), '(p_attention, axis=2)\n', (6691, 6712), True, 'import tensorflow as tf\n'), ((6894, 6945), 'tensorflow.multiply', 'tf.multiply', (['attention_states_original', 'p_attention'], {}), '(attention_states_original, p_attention)\n', (6905, 6945), True, 'import tensorflow as tf\n'), ((7012, 7050), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['attention_final'], {'axis': '(1)'}), '(attention_final, axis=1)\n', (7025, 7050), True, 'import tensorflow as tf\n'), ((588, 625), 'tensorflow.matmul', 'tf.matmul', (['prev', 'output_projection[0]'], {}), '(prev, output_projection[0])\n', (597, 625), True, 'import tensorflow as tf\n'), ((3517, 3563), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loop_function"""'], {'reuse': '(True)'}), "('loop_function', reuse=True)\n", (3534, 3563), True, 'import tensorflow as tf\n'), ((4137, 4177), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (4165, 4177), True, 'import tensorflow as tf\n'), ((4408, 4448), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (4436, 4448), True, 'import tensorflow as tf\n'), ((5612, 5652), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (5640, 5652), True, 'import tensorflow as tf\n'), ((3652, 3675), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (3673, 3675), True, 'import tensorflow as tf\n')]
|
# Generated by Django 2.2.14 on 2020-09-26 06:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0024_auto_20200914_0433'),
]
operations = [
migrations.AlterModelOptions(
name='profile',
options={'permissions': (('view_karma_points', 'Can view karma points'), ('deactivate_users', 'Can deactivate users'))},
),
]
|
[
"django.db.migrations.AlterModelOptions"
] |
[((226, 400), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""profile"""', 'options': "{'permissions': (('view_karma_points', 'Can view karma points'), (\n 'deactivate_users', 'Can deactivate users'))}"}), "(name='profile', options={'permissions': ((\n 'view_karma_points', 'Can view karma points'), ('deactivate_users',\n 'Can deactivate users'))})\n", (254, 400), False, 'from django.db import migrations\n')]
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from .. import QtCore, QtGui
from . import AbstractMPLDataView
from .. import AbstractDataView2D
import logging
logger = logging.getLogger(__name__)
class ContourView(AbstractDataView2D, AbstractMPLDataView):
"""
The ContourView provides a UI widget for viewing a number of 1-D
data sets as a contour plot, starting from dataset 0 at y = 0
"""
def __init__(self, fig, data_list=None, cmap=None, norm=None, *args,
**kwargs):
"""
__init__ docstring
Parameters
----------
fig : figure to draw the artists on
x_data : list
list of vectors of x-coordinates
y_data : list
list of vectors of y-coordinates
lbls : list
list of the names of each data set
cmap : colormap that matplotlib understands
norm : mpl.colors.Normalize
"""
# set some defaults
# no defaults yet
# call the parent constructors
super(ContourView, self).__init__(data_list=data_list, fig=fig,
cmap=cmap, norm=norm, *args,
**kwargs)
# create the matplotlib axes
self._ax = self._fig.add_subplot(1, 1, 1)
self._ax.set_aspect('equal')
# plot the data
self.replot()
def replot(self):
"""
Override
Replot the data after modifying a display parameter (e.g.,
offset or autoscaling) or adding new data
"""
# TODO: This class was originally written to convert a 1-D stack into a
# 2-D contour. Rewrite this replot method
# get the keys from the dict
keys = list(six.iterkeys(self._data))
# number of datasets in the data dict
num_keys = len(keys)
# cannot plot data if there are no keys
if num_keys < 1:
return
# set the local counter
counter = num_keys - 1
# @tacaswell Should it be required that all datasets are the same
# length?
num_coords = len(self._data[keys[0]][0])
# declare the array
self._data_arr = np.zeros((num_keys, num_coords))
# add the data to the main axes
for key in self._data.keys():
# get the (x,y) data from the dictionary
(x, y) = self._data[key]
# add the data to the array
self._data_arr[counter] = y
# decrement the counter
counter -= 1
# get the first dataset to get the x axis and number of y datasets
x, y = self._data[keys[0]]
y = np.arange(len(keys))
# TODO: Colormap initialization is not working properly.
self._ax.contourf(x, y, self._data_arr) # , cmap=colors.Colormap(self._cmap))
|
[
"numpy.zeros",
"logging.getLogger",
"six.iterkeys"
] |
[((2744, 2771), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2761, 2771), False, 'import logging\n'), ((4791, 4823), 'numpy.zeros', 'np.zeros', (['(num_keys, num_coords)'], {}), '((num_keys, num_coords))\n', (4799, 4823), True, 'import numpy as np\n'), ((4341, 4365), 'six.iterkeys', 'six.iterkeys', (['self._data'], {}), '(self._data)\n', (4353, 4365), False, 'import six\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
import re
import io
loss = Path.cwd().parent.joinpath('savefiles', 'checkpoints', 'loss.log').read_text()
loss = re.sub(r'[\]\[]', '', loss)
df = pd.read_csv(io.StringIO(loss), names=['epoch', 'iteration', 'cls_loss', 'box_loss', 'run_loss'])
def avg_loss(period):
_df = df.groupby(df.index // period).mean()
x = np.array(list(_df.index))
y_cls = np.array(_df['cls_loss'].to_list())
y_box = np.array(_df['box_loss'].to_list())
plt.plot(x, y_cls, y_box)
plt.show()
|
[
"io.StringIO",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pathlib.Path.cwd",
"re.sub"
] |
[((211, 239), 're.sub', 're.sub', (['"""[\\\\]\\\\[]"""', '""""""', 'loss'], {}), "('[\\\\]\\\\[]', '', loss)\n", (217, 239), False, 'import re\n'), ((256, 273), 'io.StringIO', 'io.StringIO', (['loss'], {}), '(loss)\n', (267, 273), False, 'import io\n'), ((547, 572), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_cls', 'y_box'], {}), '(x, y_cls, y_box)\n', (555, 572), True, 'import matplotlib.pyplot as plt\n'), ((577, 587), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (585, 587), True, 'import matplotlib.pyplot as plt\n'), ((125, 135), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (133, 135), False, 'from pathlib import Path\n')]
|
import math
import numpy as np
from uam_simulator import my_utils
from uam_simulator import pathPlanning
from uam_simulator import orca
import gurobipy as grb
from gurobipy import GRB
import time as python_time
class Flightplan:
def __init__(self, t0, dt, positions, times=None):
self.start_time = t0
self.positions = positions
self.time_step = dt
self.times = None
if times is not None:
self.time_step = None
self.times = np.array(times)
else:
self.times = np.array([self.start_time + i * self.time_step for i in range(0, len(self.positions))])
if self.time_step is not None:
self.end_time = self.start_time + (len(self.positions) - 1) * self.time_step
else:
self.end_time=self.times[-1]
def get_planned_position_at(self, time, return_velocity=False, ignore_timed_out=False, debug=False):
""" Interpolates between the flight points """
if ignore_timed_out and time > self.end_time:
if return_velocity:
return None, None
else:
return None
n = len(self.positions)
if self.time_step is not None:
idx_float = (float(time) - float(self.start_time)) / float(self.time_step)
idx_low = min(math.floor(idx_float), n - 1)
idx_high = min(math.ceil(idx_float), n - 1)
if idx_low == idx_high:
# Indices are equal because idx_float is an int
if return_velocity:
if idx_high == n-1:
velocity = np.array([0, 0])
else:
velocity = (self.positions[idx_high+1]-self.positions[idx_high])/(self.times[idx_high+1]-self.times[idx_high])
return self.positions[idx_high], velocity
else:
return self.positions[idx_high]
else:
if time > self.times[-1]:
return np.copy(self.positions[-1])
idx_high = np.searchsorted(self.times, time)
idx_low = max(0, idx_high - 1)
if self.times[idx_high] == time or idx_low == idx_high:
if return_velocity:
if idx_high == n-1:
velocity = np.array([0, 0])
else:
velocity = (self.positions[idx_high+1]-self.positions[idx_high])/(self.times[idx_high+1]-self.times[idx_high])
return self.positions[idx_high], velocity
else:
return self.positions[idx_high]
idx_float = idx_low + (time - self.times[idx_low]) / (self.times[idx_high] - self.times[idx_low])
pos_high = self.positions[idx_high]
pos_low = self.positions[idx_low] # if time is exactly integer then returns the exact pos
if debug:
print(idx_float)
print(pos_high)
print(pos_low)
if return_velocity:
return pos_low + (pos_high - pos_low) * (idx_float - idx_low), (pos_high-pos_low)/(self.times[idx_high]-self.times[idx_low])
else:
return pos_low + (pos_high - pos_low) * (idx_float - idx_low)
def get_planned_trajectory_between(self, start_time, end_time, debug=False):
""" Returns trajectory between start_time and end_time"""
if (start_time - self.end_time) >= -1e-4 or (end_time - self.start_time) <= 1e-4:
return None, None
trajectory_end_time = min(end_time, self.end_time)
trajectory_start_time = max(start_time, self.start_time)
trajectory = []
times = []
if debug:
print('time step is '+str(self.time_step))
print('start_time is '+str(start_time))
print('end_time '+str(end_time))
print('positions '+str(self.positions))
print('times '+str(self.times))
print(start_time-self.end_time)
if self.time_step is None:
# self.times is sorted
[start_index, end_index] = np.searchsorted(self.times, [trajectory_start_time, trajectory_end_time])
temp = self.times[start_index]
if abs(self.times[start_index]-trajectory_start_time) > 1e-4:
# requires interpolation
# Since we already now the index we could avoid a second call to search sorted
trajectory.append(self.get_planned_position_at(trajectory_start_time))
times.append(trajectory_start_time)
for i in range(start_index, end_index):
trajectory.append(self.positions[i])
times.append(self.times[i])
# trajectory_end_time <= times[end_index]
if abs(self.times[end_index]-trajectory_end_time) > 1e-4:
# requires interpolation
trajectory.append(self.get_planned_position_at(trajectory_end_time))
times.append(trajectory_end_time)
else:
trajectory.append(self.positions[end_index])
times.append(trajectory_end_time)
else:
start_index_float = float((trajectory_start_time - self.start_time) / self.time_step)
end_index_float = float((trajectory_end_time - self.start_time) / self.time_step)
lower = math.ceil(start_index_float)
upper = min(math.floor(end_index_float), len(self.positions) - 1)
if lower != start_index_float:
pos_0 = self.get_planned_position_at(start_time)
trajectory.append(np.copy(pos_0))
times.append(trajectory_start_time)
for index in range(lower, upper + 1):
trajectory.append(self.positions[index])
# times.append(self.start_time+index*self.time_step)
times.append(self.times[index])
if upper != end_index_float:
pos_end = self.get_planned_position_at(end_time)
trajectory.append(pos_end)
times.append(trajectory_end_time)
return trajectory, times
def get_end_time(self):
if self.time_step is not None:
return self.start_time + (len(self.positions) - 1) * self.time_step
else:
return self.times[-1]
class Agent:
def __init__(self, env, radius, max_speed, start=None, end=None, start_time=0, agent_logic='dumb',
centralized_manager=None, algo_type=None, agent_dynamics=None, id=0, sensing_radius=10000,
flight_leg='initial'):
self.id = id
self.environment = env
self.centralized_manager = centralized_manager
self.agent_dynamics = agent_dynamics
if agent_logic == 'dumb':
protected_area = self.environment.get_protected_area()
else:
# All other agents can wait in place
protected_area = None
# Can't have random start and not random end (or vice versa)
if start is None or end is None:
self.start, self.goal = self.environment.get_random_start_and_end(protected_area_start=protected_area)
if np.linalg.norm(self.start - self.goal) < 10:
# Play one more time
# print('agent start and goal are close, redrawing at random')
self.start, self.goal = self.environment.get_random_start_and_end(protected_area_start=protected_area)
if np.linalg.norm(self.start - self.goal) < 10:
print('unlikely, agent start and goal are still close')
else:
self.start = start
self.goal = end
self.position = np.copy(self.start) # Passed by reference
self.new_position = np.copy(self.start)
self.radius = radius
self.orientation = 0
self.minSpeed = 0.0
self.maxSpeed = max_speed
self.sensing_radius = sensing_radius
self.desired_start_time = start_time
self.start_time = start_time # actual start time if a ground delay is planned
if np.linalg.norm(self.goal - self.start) == 0:
print(agent_logic)
print(start)
print(end)
print(np.linalg.norm(self.goal - self.start))
self.velocity = self.maxSpeed * (self.goal - self.start) / (np.linalg.norm(self.goal - self.start))
self.new_velocity = self.velocity
self.trajectory = []
self.trajectory_times = []
self.collision_avoidance_time = []
self.preflight_time = None
self.flightPlan = None
self.status = 'ok'
self.agent_logic = agent_logic
self.tolerance = self.environment.tolerance
self.t_removed_from_sim=None
if agent_logic == 'dumb':
self.ownship = False
else:
self.ownship = True
self.flight_status = 'initialized'
self.algo_type = algo_type
self.cumulative_density=0
self.density=0
self.n_steps=0
self.flight_leg=flight_leg
def get_predicted_end_time(self):
if self.flightPlan is not None:
return self.flightPlan.end_time
else:
print('Agent: in order to get the predicted end time a flight plan must exist')
return self.start_time
def compute_next_move(self, current_time, dt, debug=False, density=0):
""" Store the next position in self.new_position. The position is updated when move is called """
if self.agent_logic == 'dumb':
self.new_position = self.compute_straight_move(self.position, self.goal, self.maxSpeed, dt)
self.new_velocity = (self.new_position - self.position) / dt
if self.agent_logic == 'reactive':
self.cumulative_density += density
self.n_steps += 1
if self.algo_type is None:
self.algo_type = 'MVP'
self.new_velocity = self.collision_avoidance(dt, algo_type=self.algo_type)
self.new_velocity = self.velocity_update(self.new_velocity)
self.new_position += self.new_velocity * dt
if self.agent_logic == 'strategic':
# Follow flight plan (without consideration for kinematic properties)
self.new_position = self.flightPlan.get_planned_position_at(current_time + dt, debug=debug)
self.new_velocity = (self.new_position - self.position) / dt
if debug:
print('New position ' + str(self.new_position))
print('old position ' + str(self.position))
if self.trajectory == []:
self.trajectory.append(np.copy(self.position))
self.trajectory_times.append(current_time)
self.trajectory.append(np.copy(self.new_position))
self.trajectory_times.append(current_time + dt)
self.flight_status = 'ongoing'
def compute_straight_move(self, current_position, goal, speed, dt):
orientation = math.atan2(goal[1] - current_position[1], goal[0] - current_position[0])
d = np.linalg.norm(goal - current_position)
max_step_length = min(speed * dt, d) # slow down to arrive at the goal on the next time step
return current_position + np.array([math.cos(orientation), math.sin(orientation)]) * max_step_length
def move(self):
self.position = np.copy(self.new_position)
self.velocity = np.copy(self.new_velocity)
return self.position
def velocity_update(self, new_velocity):
# Introduce kinematic constraints
# For now just clamp the velocity and instantly change the orientation
v = np.linalg.norm(new_velocity)
v_clamped = my_utils.clamp(self.minSpeed, self.maxSpeed, v)
if self.agent_dynamics is None:
return new_velocity * v_clamped / v
else:
turn_angle = my_utils.get_angle(self.velocity, new_velocity)
max_angle=30*math.pi/180
if abs(turn_angle)>max_angle:
vel = self.velocity * v_clamped / np.linalg.norm(self.velocity)
theta=math.copysign(max_angle,turn_angle)
return vel @ np.asarray([[math.cos(theta), math.sin(theta)], [-math.sin(theta), math.cos(theta)]])
else:
return new_velocity * v_clamped / v
def preflight(self, dt, algo_type='Straight', density=0):
# Given, the start/goals and published flight plans of other agents find a free path and publish it
self.density = density
if self.centralized_manager is None:
print('agent.py preflight error, a centralized manager must exist')
if algo_type == 'Straight':
timer_start = python_time.time()
plan = []
plan.append(self.start)
pos = np.copy(self.start)
d = np.linalg.norm(self.goal - pos)
# Larger time steps require larger tolerance
# TODO tolerances are a bit of a mess
while d > self.maxSpeed * dt:
pos = self.compute_straight_move(pos, self.goal, self.maxSpeed, dt)
d = np.linalg.norm(self.goal - pos)
plan.append(pos)
if d != 0:
plan.append(self.goal)
self.flightPlan = Flightplan(self.start_time, dt, plan)
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
if algo_type == 'LocalVO':
timer_start = python_time.time()
local_planner = pathPlanning.Local_VO(self.start, self.goal, self.start_time, self.maxSpeed, self.centralized_manager, self.tolerance)
success, plan, times = local_planner.search()
if not success:
self.flight_status = 'cancelled'
return None
self.start_time = times[0]
## Debug
if len(times) < 2:
print('the plan is too short')
print('agent start '+ str(self.start))
print('agent goal '+str(self.goal))
print('agent plan pos '+str(plan))
print('agent plan times ' + str(times))
self.flightPlan = Flightplan(times[0], times[1] - times[0], plan, times=np.array(times))
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
if algo_type == 'Decoupled':
timer_start = python_time.time()
decoupled_planner = pathPlanning.DecoupledApproach(self.start, self.goal, self.start_time, self.maxSpeed,
self.centralized_manager, self.tolerance)
success, plan, times = decoupled_planner.search()
if not success:
self.flight_status = 'cancelled'
return None
self.start_time = times[0]
self.flightPlan = Flightplan(times[0], times[1] - times[0], plan, times=np.array(times))
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
if algo_type == 'SIPP':
timer_start = python_time.time()
sipp_planner = pathPlanning.SIPP(self.start, self.goal, self.start_time, self.maxSpeed,
self.centralized_manager, self.tolerance)
success, plan, times = sipp_planner.search()
if not success:
self.flight_status = 'cancelled'
return None
self.start_time = times[0]
self.flightPlan = Flightplan(times[0], times[1] - times[0], plan, times=np.array(times))
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
if algo_type == 'A_star_8':
timer_start = python_time.time()
astar_planner = pathPlanning.AStar_8grid(self.start, self.goal, self.start_time, self.maxSpeed,
self.centralized_manager)
success, plan, times = astar_planner.search()
if not success:
self.flight_status = 'cancelled'
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return None
self.start_time = times[0]
self.flightPlan = Flightplan(times[0], times[1] - times[0], plan, times=np.array(times))
timer_end = python_time.time()
self.preflight_time = timer_end - timer_start
return self.flightPlan
else:
print('The algo type ' + algo_type + ' is not implemented')
def can_safely_take_off(self, t):
if self.algo_type == 'straight':
return True
neighbors = self.environment.get_neighbors(self.position, self.radius)
for vehicle in neighbors:
if t >= vehicle.start_time and vehicle.id != self.id:
# if np.linalg.norm(self.position - vehicle.position) <= self.radius:
self.flight_status = 'waiting'
return False
self.start_time = t
return True
def collision_avoidance(self, dt, algo_type='MVP'):
# Given current position, next flight plan goal and surrounding vehicles decide where to go
# Based on Hoekstra Bluesky simulator
# The returned velocity might not be feasible
if algo_type == 'MVP_Bluesky':
timer_start = python_time.time()
neighbors = self.get_neighbors()
velocity_change = np.asarray([0.0, 0.0])
direction = self.goal - self.position
d = np.linalg.norm(direction)
desired_velocity = min(self.maxSpeed, d / dt) * direction / d
safety_factor = 1.10 # 10% safety factor (as in The effects of Swarming on a Voltage Potential-Based Conflict Resolution Algorithm, <NAME>)
# if d<=self.radius:
# dV=0
# else:
for neighbor in neighbors:
# Find Time of Closest Approach
delta_pos = self.position - neighbor.position
dist=np.linalg.norm(delta_pos)
delta_vel = desired_velocity - neighbor.velocity
if np.linalg.norm(delta_vel)==0:
t_cpa=0
else:
t_cpa=-np.dot(delta_pos, delta_vel) / np.dot(delta_vel, delta_vel)
dcpa = delta_pos+delta_vel*t_cpa
dabsH = np.linalg.norm(dcpa)
# If there is a conflict
if dabsH < self.radius:
# If head-on conflict
if dabsH<=10:
dabsH=10
dcpa[0] = delta_pos[1] / dist * dabsH
dcpa[1] = -delta_pos[0] / dist * dabsH
if self.radius*safety_factor < dist:
erratum = np.cos(np.arcsin((self.radius*safety_factor) / dist) - np.arcsin(dabsH / dist))
dV =(((self.radius*safety_factor) / erratum - dabsH) * dcpa)/(abs(t_cpa)*dabsH)
else:
# If already moving away from conflict (tcpa is negative) then just keep going
if t_cpa<=0:
dV = 0
else:
dV =(self.radius*safety_factor - dabsH)*dcpa/(abs(t_cpa)*dabsH)
velocity_change += dV
timer_end = python_time.time()
self.collision_avoidance_time.append(timer_end - timer_start)
return desired_velocity + velocity_change
elif algo_type == 'VO':
timer_start = python_time.time()
intruders = self.get_neighbors()
d = np.linalg.norm(self.goal - self.position)
speed = min(d / dt, self.maxSpeed)
if d == 0:
print('VO, this should not happen')
print('distance to goal is 0')
desired_velocity = (self.goal - self.position) * speed / d
model = setupMIQCP(intruders, desired_velocity, self)
model.optimize()
if model.status != GRB.Status.OPTIMAL:
print('Error gurobi failed to find a solution')
print(model.status)
vars = model.getVars()
if intruders != []:
# plotter([-1000,1000],[-1000,1000],100,[get_VO(intruders[0],self)],chosen_v=np.array([vars[0].x,vars[1].x]))
pass
timer_end = python_time.time()
self.collision_avoidance_time.append(timer_end - timer_start)
return np.array([vars[0].x, vars[1].x])
elif algo_type == 'ORCA':
timer_start = python_time.time()
reactive_solver = orca.ORCA()
vel=reactive_solver.compute_new_velocity(self, dt)
timer_end = python_time.time()
self.collision_avoidance_time.append(timer_end - timer_start)
return vel
elif algo_type == 'straight':
timer_start = python_time.time()
d = np.linalg.norm(self.goal - self.position)
speed = min(d / dt, self.maxSpeed)
desired_velocity = (self.goal - self.position) * speed / d
timer_end = python_time.time()
self.collision_avoidance_time.append(timer_end - timer_start)
return desired_velocity
else:
print(algo_type+' not implemented ')
def get_neighbors(self):
neighbors = self.environment.get_neighbors(self.position, self.sensing_radius)
if neighbors == []:
return []
else:
return neighbors[neighbors != self]
def get_nearest_neighbors(self, k, max_radius):
# Will return itself so query one more neighbor
neighbors = self.environment.get_nearest_neighbors(self.position, k+1, max_radius)
if neighbors == []:
return []
else:
return neighbors[neighbors != self]
def finish_flight(self, t,goal_pos=None, t_removed_from_sim=None):
self.flight_status = 'finished'
self.arrival_time = t
self.t_removed_from_sim=t_removed_from_sim
if goal_pos is not None:
self.trajectory.append(np.copy(goal_pos))
self.trajectory_times.append(t)
def log_agent(self):
agent_log = {'flight_status': self.flight_status,
'agent_type': self.agent_logic,
'desired_time_of_departure': self.desired_start_time,
'agent_id':self.id}
if self.flight_status== 'finished' or self.flight_status == 'ongoing':
agent_log['actual_time_of_departure'] = self.start_time
if self.flight_status == 'finished':
ideal_length = float(np.linalg.norm(self.goal - self.start))
actual_length = 0
if self.trajectory == []:
print('agent, empty trajectory ') # happens if start and goal are really close
print(self.start)
print(self.goal)
pos_0 = self.trajectory[0]
for pos in self.trajectory:
d = np.linalg.norm(pos - pos_0)
actual_length += d
pos_0 = np.copy(pos)
direction = self.goal - self.start
heading = math.atan2(direction[1], direction[0])
if self.agent_logic == 'reactive':
self.density = self.cumulative_density/self.n_steps - 1
agent_log['flight_status']= self.flight_status
agent_log['agent_type']= self.agent_logic
agent_log['length_ideal']= ideal_length
agent_log['actual_length']= actual_length
agent_log['ideal_time_of_arrival']= self.desired_start_time+ideal_length / self.maxSpeed
agent_log['actual_time_of_arrival']= self.arrival_time
if self.t_removed_from_sim is not None:
agent_log['time_removed_from_sim']=self.t_removed_from_sim
agent_log['heading']= heading
agent_log['density']= self.density
if self.agent_logic == 'strategic':
agent_log['time_to_preflight'] = self.preflight_time
elif self.agent_logic == 'reactive':
agent_log['average_time_to_plan_avoidance'] = sum(self.collision_avoidance_time) / len(self.collision_avoidance_time)
agent_log['total_planning_time'] = sum(self.collision_avoidance_time)
return agent_log
def get_VO(intruder_agent, ownship_agent):
if intruder_agent == ownship_agent:
print('get_VO this should not happen intruder and ownship are the same')
rel_pos = intruder_agent.position - ownship_agent.position
d = np.linalg.norm(rel_pos)
if d == 0:
print('the distance between the two agents is 0')
if ownship_agent.radius > d:
print('there is an intruder in the protected radius')
print(ownship_agent.position)
print(intruder_agent.position)
alpha = math.asin(ownship_agent.radius / d) # VO cone half-angle (>=0)
theta = math.atan2(rel_pos[1], rel_pos[0])
vector1 = [math.cos(theta + alpha), math.sin(theta + alpha)]
vector2 = [math.cos(theta - alpha), math.sin(theta - alpha)]
# must be greater
normal_1 = np.array([vector1[1], -vector1[0]]) # Rotated +90 degrees
constraint1 = lambda x, y: np.dot((np.array([x, y]) - intruder_agent.velocity) + 0.1 * normal_1, normal_1)
# must be smaller
normal_2 = np.array([-vector2[1], vector2[0]]) # Rotated -90 degrees
constraint2 = lambda x, y: np.dot((np.array([x, y]) - intruder_agent.velocity) + 0.1 * normal_2, normal_2)
return constraint1, constraint2
def setupMIQCP(intruders, desired_vel, ownship_agent):
""" Intruders should be an array of agents """
model = grb.Model('VO')
max_vel = ownship_agent.maxSpeed
model.addVar(lb=-max_vel, ub=max_vel, name='x')
model.addVar(lb=-max_vel, ub=max_vel, name='y')
model.addVars(2 * len(intruders), vtype=GRB.BINARY)
model.update()
X = model.getVars()
n_intruder = 0
for intruder in intruders:
constraints_or = get_VO(intruder, ownship_agent)
n_constraint = 0
for constraint in constraints_or:
c = constraint(0, 0)
a = constraint(1, 0) - c
b = constraint(0, 1) - c
# K must be arbitrarily large so that when the binary constraint is 1 the constraint is always respected
K = abs(a * max_vel) + abs(b * max_vel) + c
model.addConstr(a * X[0] + b * X[1] - K * X[2 + 2 * n_intruder + n_constraint] <= -c)
n_constraint += 1
model.addConstr(X[2 + 2 * n_intruder] + X[2 + 2 * n_intruder + 1] <= 1)
n_intruder += 1
model.addConstr(X[0] * X[0] + X[1] * X[1] <= max_vel ** 2)
model.setObjective(
(X[0] - desired_vel[0]) * (X[0] - desired_vel[0]) + (X[1] - desired_vel[1]) * (X[1] - desired_vel[1]),
GRB.MINIMIZE)
model.setParam("OutputFlag", 0)
model.setParam("FeasibilityTol", 1e-9)
model.update()
return model
|
[
"math.asin",
"math.atan2",
"math.copysign",
"numpy.linalg.norm",
"uam_simulator.pathPlanning.AStar_8grid",
"numpy.copy",
"numpy.arcsin",
"math.cos",
"math.ceil",
"uam_simulator.orca.ORCA",
"numpy.asarray",
"gurobipy.Model",
"math.sin",
"numpy.dot",
"math.floor",
"uam_simulator.my_utils.clamp",
"numpy.searchsorted",
"time.time",
"uam_simulator.pathPlanning.Local_VO",
"uam_simulator.pathPlanning.SIPP",
"numpy.array",
"uam_simulator.my_utils.get_angle",
"uam_simulator.pathPlanning.DecoupledApproach"
] |
[((25063, 25086), 'numpy.linalg.norm', 'np.linalg.norm', (['rel_pos'], {}), '(rel_pos)\n', (25077, 25086), True, 'import numpy as np\n'), ((25344, 25379), 'math.asin', 'math.asin', (['(ownship_agent.radius / d)'], {}), '(ownship_agent.radius / d)\n', (25353, 25379), False, 'import math\n'), ((25420, 25454), 'math.atan2', 'math.atan2', (['rel_pos[1]', 'rel_pos[0]'], {}), '(rel_pos[1], rel_pos[0])\n', (25430, 25454), False, 'import math\n'), ((25622, 25657), 'numpy.array', 'np.array', (['[vector1[1], -vector1[0]]'], {}), '([vector1[1], -vector1[0]])\n', (25630, 25657), True, 'import numpy as np\n'), ((25829, 25864), 'numpy.array', 'np.array', (['[-vector2[1], vector2[0]]'], {}), '([-vector2[1], vector2[0]])\n', (25837, 25864), True, 'import numpy as np\n'), ((26155, 26170), 'gurobipy.Model', 'grb.Model', (['"""VO"""'], {}), "('VO')\n", (26164, 26170), True, 'import gurobipy as grb\n'), ((7713, 7732), 'numpy.copy', 'np.copy', (['self.start'], {}), '(self.start)\n', (7720, 7732), True, 'import numpy as np\n'), ((7784, 7803), 'numpy.copy', 'np.copy', (['self.start'], {}), '(self.start)\n', (7791, 7803), True, 'import numpy as np\n'), ((10998, 11070), 'math.atan2', 'math.atan2', (['(goal[1] - current_position[1])', '(goal[0] - current_position[0])'], {}), '(goal[1] - current_position[1], goal[0] - current_position[0])\n', (11008, 11070), False, 'import math\n'), ((11083, 11122), 'numpy.linalg.norm', 'np.linalg.norm', (['(goal - current_position)'], {}), '(goal - current_position)\n', (11097, 11122), True, 'import numpy as np\n'), ((11379, 11405), 'numpy.copy', 'np.copy', (['self.new_position'], {}), '(self.new_position)\n', (11386, 11405), True, 'import numpy as np\n'), ((11430, 11456), 'numpy.copy', 'np.copy', (['self.new_velocity'], {}), '(self.new_velocity)\n', (11437, 11456), True, 'import numpy as np\n'), ((11665, 11693), 'numpy.linalg.norm', 'np.linalg.norm', (['new_velocity'], {}), '(new_velocity)\n', (11679, 11693), True, 'import numpy as np\n'), ((11714, 11761), 'uam_simulator.my_utils.clamp', 'my_utils.clamp', (['self.minSpeed', 'self.maxSpeed', 'v'], {}), '(self.minSpeed, self.maxSpeed, v)\n', (11728, 11761), False, 'from uam_simulator import my_utils\n'), ((25470, 25493), 'math.cos', 'math.cos', (['(theta + alpha)'], {}), '(theta + alpha)\n', (25478, 25493), False, 'import math\n'), ((25495, 25518), 'math.sin', 'math.sin', (['(theta + alpha)'], {}), '(theta + alpha)\n', (25503, 25518), False, 'import math\n'), ((25535, 25558), 'math.cos', 'math.cos', (['(theta - alpha)'], {}), '(theta - alpha)\n', (25543, 25558), False, 'import math\n'), ((25560, 25583), 'math.sin', 'math.sin', (['(theta - alpha)'], {}), '(theta - alpha)\n', (25568, 25583), False, 'import math\n'), ((493, 508), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (501, 508), True, 'import numpy as np\n'), ((2067, 2100), 'numpy.searchsorted', 'np.searchsorted', (['self.times', 'time'], {}), '(self.times, time)\n', (2082, 2100), True, 'import numpy as np\n'), ((4101, 4174), 'numpy.searchsorted', 'np.searchsorted', (['self.times', '[trajectory_start_time, trajectory_end_time]'], {}), '(self.times, [trajectory_start_time, trajectory_end_time])\n', (4116, 4174), True, 'import numpy as np\n'), ((5371, 5399), 'math.ceil', 'math.ceil', (['start_index_float'], {}), '(start_index_float)\n', (5380, 5399), False, 'import math\n'), ((8111, 8149), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - self.start)'], {}), '(self.goal - self.start)\n', (8125, 8149), True, 'import numpy as np\n'), ((8361, 8399), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - self.start)'], {}), '(self.goal - self.start)\n', (8375, 8399), True, 'import numpy as np\n'), ((10780, 10806), 'numpy.copy', 'np.copy', (['self.new_position'], {}), '(self.new_position)\n', (10787, 10806), True, 'import numpy as np\n'), ((11889, 11936), 'uam_simulator.my_utils.get_angle', 'my_utils.get_angle', (['self.velocity', 'new_velocity'], {}), '(self.velocity, new_velocity)\n', (11907, 11936), False, 'from uam_simulator import my_utils\n'), ((12728, 12746), 'time.time', 'python_time.time', ([], {}), '()\n', (12744, 12746), True, 'import time as python_time\n'), ((12823, 12842), 'numpy.copy', 'np.copy', (['self.start'], {}), '(self.start)\n', (12830, 12842), True, 'import numpy as np\n'), ((12859, 12890), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - pos)'], {}), '(self.goal - pos)\n', (12873, 12890), True, 'import numpy as np\n'), ((13363, 13381), 'time.time', 'python_time.time', ([], {}), '()\n', (13379, 13381), True, 'import time as python_time\n'), ((13536, 13554), 'time.time', 'python_time.time', ([], {}), '()\n', (13552, 13554), True, 'import time as python_time\n'), ((13583, 13705), 'uam_simulator.pathPlanning.Local_VO', 'pathPlanning.Local_VO', (['self.start', 'self.goal', 'self.start_time', 'self.maxSpeed', 'self.centralized_manager', 'self.tolerance'], {}), '(self.start, self.goal, self.start_time, self.maxSpeed,\n self.centralized_manager, self.tolerance)\n', (13604, 13705), False, 'from uam_simulator import pathPlanning\n'), ((14342, 14360), 'time.time', 'python_time.time', ([], {}), '()\n', (14358, 14360), True, 'import time as python_time\n'), ((14517, 14535), 'time.time', 'python_time.time', ([], {}), '()\n', (14533, 14535), True, 'import time as python_time\n'), ((14568, 14700), 'uam_simulator.pathPlanning.DecoupledApproach', 'pathPlanning.DecoupledApproach', (['self.start', 'self.goal', 'self.start_time', 'self.maxSpeed', 'self.centralized_manager', 'self.tolerance'], {}), '(self.start, self.goal, self.start_time, self\n .maxSpeed, self.centralized_manager, self.tolerance)\n', (14598, 14700), False, 'from uam_simulator import pathPlanning\n'), ((15090, 15108), 'time.time', 'python_time.time', ([], {}), '()\n', (15106, 15108), True, 'import time as python_time\n'), ((15260, 15278), 'time.time', 'python_time.time', ([], {}), '()\n', (15276, 15278), True, 'import time as python_time\n'), ((15306, 15424), 'uam_simulator.pathPlanning.SIPP', 'pathPlanning.SIPP', (['self.start', 'self.goal', 'self.start_time', 'self.maxSpeed', 'self.centralized_manager', 'self.tolerance'], {}), '(self.start, self.goal, self.start_time, self.maxSpeed,\n self.centralized_manager, self.tolerance)\n', (15323, 15424), False, 'from uam_simulator import pathPlanning\n'), ((15792, 15810), 'time.time', 'python_time.time', ([], {}), '()\n', (15808, 15810), True, 'import time as python_time\n'), ((15966, 15984), 'time.time', 'python_time.time', ([], {}), '()\n', (15982, 15984), True, 'import time as python_time\n'), ((16013, 16123), 'uam_simulator.pathPlanning.AStar_8grid', 'pathPlanning.AStar_8grid', (['self.start', 'self.goal', 'self.start_time', 'self.maxSpeed', 'self.centralized_manager'], {}), '(self.start, self.goal, self.start_time, self.\n maxSpeed, self.centralized_manager)\n', (16037, 16123), False, 'from uam_simulator import pathPlanning\n'), ((16608, 16626), 'time.time', 'python_time.time', ([], {}), '()\n', (16624, 16626), True, 'import time as python_time\n'), ((17621, 17639), 'time.time', 'python_time.time', ([], {}), '()\n', (17637, 17639), True, 'import time as python_time\n'), ((17715, 17737), 'numpy.asarray', 'np.asarray', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (17725, 17737), True, 'import numpy as np\n'), ((17804, 17829), 'numpy.linalg.norm', 'np.linalg.norm', (['direction'], {}), '(direction)\n', (17818, 17829), True, 'import numpy as np\n'), ((19653, 19671), 'time.time', 'python_time.time', ([], {}), '()\n', (19669, 19671), True, 'import time as python_time\n'), ((1330, 1351), 'math.floor', 'math.floor', (['idx_float'], {}), '(idx_float)\n', (1340, 1351), False, 'import math\n'), ((1387, 1407), 'math.ceil', 'math.ceil', (['idx_float'], {}), '(idx_float)\n', (1396, 1407), False, 'import math\n'), ((2016, 2043), 'numpy.copy', 'np.copy', (['self.positions[-1]'], {}), '(self.positions[-1])\n', (2023, 2043), True, 'import numpy as np\n'), ((5424, 5451), 'math.floor', 'math.floor', (['end_index_float'], {}), '(end_index_float)\n', (5434, 5451), False, 'import math\n'), ((7196, 7234), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.start - self.goal)'], {}), '(self.start - self.goal)\n', (7210, 7234), True, 'import numpy as np\n'), ((8253, 8291), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - self.start)'], {}), '(self.goal - self.start)\n', (8267, 8291), True, 'import numpy as np\n'), ((10670, 10692), 'numpy.copy', 'np.copy', (['self.position'], {}), '(self.position)\n', (10677, 10692), True, 'import numpy as np\n'), ((12118, 12154), 'math.copysign', 'math.copysign', (['max_angle', 'turn_angle'], {}), '(max_angle, turn_angle)\n', (12131, 12154), False, 'import math\n'), ((13144, 13175), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - pos)'], {}), '(self.goal - pos)\n', (13158, 13175), True, 'import numpy as np\n'), ((16335, 16353), 'time.time', 'python_time.time', ([], {}), '()\n', (16351, 16353), True, 'import time as python_time\n'), ((18303, 18328), 'numpy.linalg.norm', 'np.linalg.norm', (['delta_pos'], {}), '(delta_pos)\n', (18317, 18328), True, 'import numpy as np\n'), ((18653, 18673), 'numpy.linalg.norm', 'np.linalg.norm', (['dcpa'], {}), '(dcpa)\n', (18667, 18673), True, 'import numpy as np\n'), ((19858, 19876), 'time.time', 'python_time.time', ([], {}), '()\n', (19874, 19876), True, 'import time as python_time\n'), ((19938, 19979), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - self.position)'], {}), '(self.goal - self.position)\n', (19952, 19979), True, 'import numpy as np\n'), ((20704, 20722), 'time.time', 'python_time.time', ([], {}), '()\n', (20720, 20722), True, 'import time as python_time\n'), ((20816, 20848), 'numpy.array', 'np.array', (['[vars[0].x, vars[1].x]'], {}), '([vars[0].x, vars[1].x])\n', (20824, 20848), True, 'import numpy as np\n'), ((22450, 22467), 'numpy.copy', 'np.copy', (['goal_pos'], {}), '(goal_pos)\n', (22457, 22467), True, 'import numpy as np\n'), ((23586, 23624), 'math.atan2', 'math.atan2', (['direction[1]', 'direction[0]'], {}), '(direction[1], direction[0])\n', (23596, 23624), False, 'import math\n'), ((5620, 5634), 'numpy.copy', 'np.copy', (['pos_0'], {}), '(pos_0)\n', (5627, 5634), True, 'import numpy as np\n'), ((7495, 7533), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.start - self.goal)'], {}), '(self.start - self.goal)\n', (7509, 7533), True, 'import numpy as np\n'), ((12066, 12095), 'numpy.linalg.norm', 'np.linalg.norm', (['self.velocity'], {}), '(self.velocity)\n', (12080, 12095), True, 'import numpy as np\n'), ((14301, 14316), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (14309, 14316), True, 'import numpy as np\n'), ((15049, 15064), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (15057, 15064), True, 'import numpy as np\n'), ((15751, 15766), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (15759, 15766), True, 'import numpy as np\n'), ((16567, 16582), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (16575, 16582), True, 'import numpy as np\n'), ((18413, 18438), 'numpy.linalg.norm', 'np.linalg.norm', (['delta_vel'], {}), '(delta_vel)\n', (18427, 18438), True, 'import numpy as np\n'), ((20909, 20927), 'time.time', 'python_time.time', ([], {}), '()\n', (20925, 20927), True, 'import time as python_time\n'), ((20958, 20969), 'uam_simulator.orca.ORCA', 'orca.ORCA', ([], {}), '()\n', (20967, 20969), False, 'from uam_simulator import orca\n'), ((21057, 21075), 'time.time', 'python_time.time', ([], {}), '()\n', (21073, 21075), True, 'import time as python_time\n'), ((22999, 23037), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - self.start)'], {}), '(self.goal - self.start)\n', (23013, 23037), True, 'import numpy as np\n'), ((23401, 23428), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos - pos_0)'], {}), '(pos - pos_0)\n', (23415, 23428), True, 'import numpy as np\n'), ((23496, 23508), 'numpy.copy', 'np.copy', (['pos'], {}), '(pos)\n', (23503, 23508), True, 'import numpy as np\n'), ((25720, 25736), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (25728, 25736), True, 'import numpy as np\n'), ((25927, 25943), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (25935, 25943), True, 'import numpy as np\n'), ((1627, 1643), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1635, 1643), True, 'import numpy as np\n'), ((2323, 2339), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2331, 2339), True, 'import numpy as np\n'), ((11269, 11290), 'math.cos', 'math.cos', (['orientation'], {}), '(orientation)\n', (11277, 11290), False, 'import math\n'), ((11292, 11313), 'math.sin', 'math.sin', (['orientation'], {}), '(orientation)\n', (11300, 11313), False, 'import math\n'), ((18551, 18579), 'numpy.dot', 'np.dot', (['delta_vel', 'delta_vel'], {}), '(delta_vel, delta_vel)\n', (18557, 18579), True, 'import numpy as np\n'), ((21237, 21255), 'time.time', 'python_time.time', ([], {}), '()\n', (21253, 21255), True, 'import time as python_time\n'), ((21272, 21313), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.goal - self.position)'], {}), '(self.goal - self.position)\n', (21286, 21313), True, 'import numpy as np\n'), ((21456, 21474), 'time.time', 'python_time.time', ([], {}), '()\n', (21472, 21474), True, 'import time as python_time\n'), ((18520, 18548), 'numpy.dot', 'np.dot', (['delta_pos', 'delta_vel'], {}), '(delta_pos, delta_vel)\n', (18526, 18548), True, 'import numpy as np\n'), ((12196, 12211), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (12204, 12211), False, 'import math\n'), ((12213, 12228), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (12221, 12228), False, 'import math\n'), ((12250, 12265), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (12258, 12265), False, 'import math\n'), ((19087, 19132), 'numpy.arcsin', 'np.arcsin', (['(self.radius * safety_factor / dist)'], {}), '(self.radius * safety_factor / dist)\n', (19096, 19132), True, 'import numpy as np\n'), ((19135, 19158), 'numpy.arcsin', 'np.arcsin', (['(dabsH / dist)'], {}), '(dabsH / dist)\n', (19144, 19158), True, 'import numpy as np\n'), ((12233, 12248), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (12241, 12248), False, 'import math\n')]
|
# %%
import os
import matplotlib.pyplot as plt
# %% 設定
datasets = ['hariko/scripts',]
# %% "「" を含む行数をカウントする関数
def count_kakko(f_path):
'''行数と "「" を含む行数を取得する
parameters
----------
f_path : str
調査するファイルのパス名
returns
-------
lines : int
ファイルの行数
lines_with_kakko
"「" を含む行数
'''
lines = 0
lines_with_kakko = 0
with open(f_path, encoding='utf-8') as f:
while True:
l = f.readline()
if not l:
break
if '「' in l:
lines_with_kakko += 1
lines += 1
return (lines, lines_with_kakko)
# %% すべてのファイルについて調べる
params = []
for set_dir in datasets:
files = os.listdir(path=set_dir)
for f in files:
f_path = os.path.abspath(os.path.join(set_dir, f))
if os.path.isfile(f_path):
params.append(count_kakko(f_path))
# %% 可視化する
(x, y) = zip(*params)
plt.scatter(x, y)
# %% y を行数ではなく割合で表示
y = [y/x for (x, y) in params]
plt.scatter(x, y, alpha=0.3)
|
[
"matplotlib.pyplot.scatter",
"os.path.isfile",
"os.path.join",
"os.listdir"
] |
[((942, 959), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (953, 959), True, 'import matplotlib.pyplot as plt\n'), ((1012, 1040), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'alpha': '(0.3)'}), '(x, y, alpha=0.3)\n', (1023, 1040), True, 'import matplotlib.pyplot as plt\n'), ((722, 746), 'os.listdir', 'os.listdir', ([], {'path': 'set_dir'}), '(path=set_dir)\n', (732, 746), False, 'import os\n'), ((837, 859), 'os.path.isfile', 'os.path.isfile', (['f_path'], {}), '(f_path)\n', (851, 859), False, 'import os\n'), ((800, 824), 'os.path.join', 'os.path.join', (['set_dir', 'f'], {}), '(set_dir, f)\n', (812, 824), False, 'import os\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Toolset implementation for tpDcc-tools-unittests
"""
from __future__ import print_function, division, absolute_import
from tpDcc.libs.qt.widgets import toolset
class UnitTestsToolsetWidget(toolset.ToolsetWidget, object):
def __init__(self, *args, **kwargs):
self._unit_test_paths = kwargs.get('unit_test_paths', list())
self._unit_test_paths.extend([r'D:\tpDcc\tpDcc-libs-nameit\tests'])
super(UnitTestsToolsetWidget, self).__init__(*args, **kwargs)
def contents(self):
from tpDcc.tools.unittests.core import model, view, controller
unit_test_model = model.UnitTestModel()
unit_test_controller = controller.UnitTestController(client=self._client, model=unit_test_model)
unit_test_view = view.UnitTestView(
unit_test_paths=self._unit_test_paths, model=unit_test_model, controller=unit_test_controller, parent=self)
return [unit_test_view]
|
[
"tpDcc.tools.unittests.core.model.UnitTestModel",
"tpDcc.tools.unittests.core.view.UnitTestView",
"tpDcc.tools.unittests.core.controller.UnitTestController"
] |
[((659, 680), 'tpDcc.tools.unittests.core.model.UnitTestModel', 'model.UnitTestModel', ([], {}), '()\n', (678, 680), False, 'from tpDcc.tools.unittests.core import model, view, controller\n'), ((712, 785), 'tpDcc.tools.unittests.core.controller.UnitTestController', 'controller.UnitTestController', ([], {'client': 'self._client', 'model': 'unit_test_model'}), '(client=self._client, model=unit_test_model)\n', (741, 785), False, 'from tpDcc.tools.unittests.core import model, view, controller\n'), ((811, 941), 'tpDcc.tools.unittests.core.view.UnitTestView', 'view.UnitTestView', ([], {'unit_test_paths': 'self._unit_test_paths', 'model': 'unit_test_model', 'controller': 'unit_test_controller', 'parent': 'self'}), '(unit_test_paths=self._unit_test_paths, model=\n unit_test_model, controller=unit_test_controller, parent=self)\n', (828, 941), False, 'from tpDcc.tools.unittests.core import model, view, controller\n')]
|
import random
import time
import numpy as np
import copy
from itertools import compress
random.seed(123)
#remove columns from adj matrix.
#TODO needs additional scaling?
#Be carefull too not modify the initial complete support matrix
def get_sub_sampled_support(complete_support, node_to_keep):
index_array = complete_support[0][:] # make a copy to avoid modifying complete support
values = np.zeros(complete_support[1].shape)
index_array_sorted = index_array[:, 1].argsort()
j = 0
node_to_keep.sort()
for index_to_keep in node_to_keep:
while (j < len(index_array_sorted) and index_to_keep >= index_array[index_array_sorted[j]][1]):
if (index_to_keep == index_array[index_array_sorted[j]][1]):
values[index_array_sorted[j]] = complete_support[1][index_array_sorted[j]]
j += 1
sub_sampled_support = (index_array, values, complete_support[2])
return sub_sampled_support
# Return a train mask for label_percent of the trainig set.
# if maintain_label_balance, keep smallest number of labels per class in training set that respect the label_percent, except for 100 %
def get_train_mask(label_percent, y_train, initial_train_mask, maintain_label_balance=False):
train_index = np.argwhere(initial_train_mask).reshape(-1)
train_mask = np.zeros((initial_train_mask.shape), dtype=bool) # list of False
if maintain_label_balance:
ones_index = []
for i in range(y_train.shape[1]): # find the ones for each class
ones_index.append(train_index[np.argwhere(y_train[train_index, i] > 0).reshape(-1)])
if label_percent < 100:
smaller_num = min(
int(len(l) * (label_percent / 100))
for l in ones_index) # find smaller number of ones per class that respect the % constraint
for ones in ones_index:
random_index = random.sample(list(ones), smaller_num)
train_mask[random_index] = True # set the same number of ones for each class, so the set is balanced
else:
for ones in ones_index:
train_mask[ones] = True
else:
random_sampling_set_size = int((label_percent / 100) * train_index.shape[0])
random_list = random.sample(list(train_index), random_sampling_set_size)
train_mask[random_list] = True
label_percent = (100 * np.sum(train_mask) / train_index.shape[0])
return train_mask, label_percent
#returns a random list of indexes of the node to be kept at random.
def get_random_percent(num_nodes, percent):
if percent > 100:
print("This is not how percentage works.")
exit()
random_sampling_set_size = int((percent * num_nodes) / 100)
return random.sample(range(num_nodes), random_sampling_set_size)
#returns a list of indexes for the mask
def get_list_from_mask(mask):
return list(compress(range(len(mask)), mask))
# Set features of node that shouldn't be in the set to crazy things to make sure they are not in the gcnn
def modify_features_that_shouldnt_change_anything(features, note_to_keep):
note_doesnt_exist = [x for x in range(features[2][0]) if x not in note_to_keep]
a = np.where(np.isin(features[0][:, 0], note_doesnt_exist))
features[1][a[0]] = 10000000
|
[
"numpy.isin",
"numpy.sum",
"numpy.zeros",
"random.seed",
"numpy.argwhere"
] |
[((89, 105), 'random.seed', 'random.seed', (['(123)'], {}), '(123)\n', (100, 105), False, 'import random\n'), ((403, 438), 'numpy.zeros', 'np.zeros', (['complete_support[1].shape'], {}), '(complete_support[1].shape)\n', (411, 438), True, 'import numpy as np\n'), ((1324, 1370), 'numpy.zeros', 'np.zeros', (['initial_train_mask.shape'], {'dtype': 'bool'}), '(initial_train_mask.shape, dtype=bool)\n', (1332, 1370), True, 'import numpy as np\n'), ((3221, 3266), 'numpy.isin', 'np.isin', (['features[0][:, 0]', 'note_doesnt_exist'], {}), '(features[0][:, 0], note_doesnt_exist)\n', (3228, 3266), True, 'import numpy as np\n'), ((1263, 1294), 'numpy.argwhere', 'np.argwhere', (['initial_train_mask'], {}), '(initial_train_mask)\n', (1274, 1294), True, 'import numpy as np\n'), ((2400, 2418), 'numpy.sum', 'np.sum', (['train_mask'], {}), '(train_mask)\n', (2406, 2418), True, 'import numpy as np\n'), ((1562, 1602), 'numpy.argwhere', 'np.argwhere', (['(y_train[train_index, i] > 0)'], {}), '(y_train[train_index, i] > 0)\n', (1573, 1602), True, 'import numpy as np\n')]
|
# MIT License
#
# Copyright (c) 2021 <NAME> and <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sentencepiece as spm
import shutil
from typing import Tuple
from openspeech.datasets.librispeech.preprocess.preprocess import collect_transcripts
SENTENCEPIECE_MODEL_NAME = "sp"
def _prepare_tokenizer(train_transcripts, vocab_size):
""" Prepare sentencepice tokenizer """
input_file = 'spm_input.txt'
model_type = 'unigram'
with open(input_file, 'w') as f:
for transcript in train_transcripts:
f.write(f"{transcript.split('|')[-1]}\n")
spm.SentencePieceTrainer.Train(f"--input={input_file} "
f"--model_prefix={SENTENCEPIECE_MODEL_NAME} "
f"--vocab_size={vocab_size} "
f"--model_type={model_type} "
f"--pad_id=0 "
f"--bos_id=1 "
f"--eos_id=2 "
f"--unk_id=3 "
f"--user_defined_symbols=<blank>")
def generate_manifest_files(dataset_path: str, manifest_file_path: str, vocab_path: str, vocab_size: int) -> None:
"""
Generate manifest files.
Format: {audio_path}\t{transcript}\t{numerical_label}
Args:
vocab_size (int): size of subword vocab
Returns:
None
"""
transcripts_collection = collect_transcripts(dataset_path)
_prepare_tokenizer(transcripts_collection[0], vocab_size)
shutil.copy(f"{SENTENCEPIECE_MODEL_NAME}.model", os.path.join(vocab_path, f"{SENTENCEPIECE_MODEL_NAME}.model"))
shutil.copy(f"{SENTENCEPIECE_MODEL_NAME}.vocab", os.path.join(vocab_path, f"{SENTENCEPIECE_MODEL_NAME}.vocab"))
sp = spm.SentencePieceProcessor()
sp.Load(os.path.join(vocab_path, f"{SENTENCEPIECE_MODEL_NAME}.model"))
with open(manifest_file_path, 'w') as f:
for idx, part in enumerate(['train-960', 'dev-clean', 'dev-other', 'test-clean', 'test-other']):
for transcript in transcripts_collection[idx]:
audio_path, transcript = transcript.split('|')
text = " ".join(sp.EncodeAsPieces(transcript))
label = " ".join([str(item) for item in sp.EncodeAsIds(transcript)])
f.write(f"{audio_path}\t{text}\t{label}\n")
|
[
"openspeech.datasets.librispeech.preprocess.preprocess.collect_transcripts",
"os.path.join",
"sentencepiece.SentencePieceProcessor",
"sentencepiece.SentencePieceTrainer.Train"
] |
[((1623, 1857), 'sentencepiece.SentencePieceTrainer.Train', 'spm.SentencePieceTrainer.Train', (['f"""--input={input_file} --model_prefix={SENTENCEPIECE_MODEL_NAME} --vocab_size={vocab_size} --model_type={model_type} --pad_id=0 --bos_id=1 --eos_id=2 --unk_id=3 --user_defined_symbols=<blank>"""'], {}), "(\n f'--input={input_file} --model_prefix={SENTENCEPIECE_MODEL_NAME} --vocab_size={vocab_size} --model_type={model_type} --pad_id=0 --bos_id=1 --eos_id=2 --unk_id=3 --user_defined_symbols=<blank>'\n )\n", (1653, 1857), True, 'import sentencepiece as spm\n'), ((2495, 2528), 'openspeech.datasets.librispeech.preprocess.preprocess.collect_transcripts', 'collect_transcripts', (['dataset_path'], {}), '(dataset_path)\n', (2514, 2528), False, 'from openspeech.datasets.librispeech.preprocess.preprocess import collect_transcripts\n'), ((2834, 2862), 'sentencepiece.SentencePieceProcessor', 'spm.SentencePieceProcessor', ([], {}), '()\n', (2860, 2862), True, 'import sentencepiece as spm\n'), ((2645, 2706), 'os.path.join', 'os.path.join', (['vocab_path', 'f"""{SENTENCEPIECE_MODEL_NAME}.model"""'], {}), "(vocab_path, f'{SENTENCEPIECE_MODEL_NAME}.model')\n", (2657, 2706), False, 'import os\n'), ((2761, 2822), 'os.path.join', 'os.path.join', (['vocab_path', 'f"""{SENTENCEPIECE_MODEL_NAME}.vocab"""'], {}), "(vocab_path, f'{SENTENCEPIECE_MODEL_NAME}.vocab')\n", (2773, 2822), False, 'import os\n'), ((2875, 2936), 'os.path.join', 'os.path.join', (['vocab_path', 'f"""{SENTENCEPIECE_MODEL_NAME}.model"""'], {}), "(vocab_path, f'{SENTENCEPIECE_MODEL_NAME}.model')\n", (2887, 2936), False, 'import os\n')]
|
import sys
from math import pi
from openmdao.api import Problem, Group, Component, IndepVarComp, ExecComp, \
ScipyOptimizer, SubProblem, CaseDriver
class MultiMinGroup(Group):
"""
In the range -pi <= x <= pi
function has 2 local minima, one is global
global min is: f(x) = -1.31415926 at x = pi
local min at: f(x) = -0.69084489952 at x = -3.041593
"""
def __init__(self):
super(MultiMinGroup, self).__init__()
self.add('indep', IndepVarComp('x', 0.0))
self.add("comp", ExecComp("fx = cos(x)-x/10."))
self.connect("indep.x", "comp.x")
def main(num_par_doe):
# First, define a Problem to be able to optimize our function.
sub = Problem(root=MultiMinGroup())
# set up our SLSQP optimizer
sub.driver = subdriver = ScipyOptimizer()
subdriver.options['optimizer'] = 'SLSQP'
subdriver.options['disp'] = False # disable optimizer output
# In this case, our design variable is indep.x, which happens
# to be connected to the x parameter on our 'comp' component.
subdriver.add_desvar("indep.x", lower=-pi, upper=pi)
# We are minimizing comp.fx, so that's our objective.
subdriver.add_objective("comp.fx")
# Now, create our top level problem
prob = Problem(root=Group())
prob.root.add("top_indep", IndepVarComp('x', 0.0))
# add our subproblem. Note that 'indep.x' is actually an unknown
# inside of the subproblem, but outside of the subproblem we're treating
# it as a parameter.
prob.root.add("subprob", SubProblem(sub, params=['indep.x'],
unknowns=['comp.fx']))
prob.root.connect("top_indep.x", "subprob.indep.x")
# use a CaseDriver as our top level driver so we can run multiple
# separate optimizations concurrently. We'll run 'num_par_doe'
# concurrent cases. In this case we need no more than 2 because
# we're only running 2 total cases.
prob.driver = CaseDriver(num_par_doe=num_par_doe)
prob.driver.add_desvar('top_indep.x')
prob.driver.add_response(['subprob.indep.x', 'subprob.comp.fx'])
# these are the two cases we're going to run. The top_indep.x values of
# -1 and 1 will end up at the local and global minima when we run the
# concurrent subproblem optimizers.
prob.driver.cases = [
[('top_indep.x', -1.0)],
[('top_indep.x', 1.0)]
]
prob.setup(check=False)
# run the concurrent optimizations
prob.run()
# collect responses for all of our input cases
optvals = [dict(resp) for resp, success, msg in prob.driver.get_responses()]
# find the minimum value of subprob.comp.fx in our responses
global_opt = sorted(optvals, key=lambda x: x['subprob.comp.fx'])[0]
return global_opt
if __name__ == '__main__':
global_opt = main(2)
print("\nGlobal optimum:\n subprob.comp.fx = %s at subprob.indep.x = %s" %
(global_opt['subprob.comp.fx'], global_opt['subprob.indep.x']))
|
[
"openmdao.api.IndepVarComp",
"openmdao.api.ExecComp",
"openmdao.api.ScipyOptimizer",
"openmdao.api.CaseDriver",
"openmdao.api.Group",
"openmdao.api.SubProblem"
] |
[((823, 839), 'openmdao.api.ScipyOptimizer', 'ScipyOptimizer', ([], {}), '()\n', (837, 839), False, 'from openmdao.api import Problem, Group, Component, IndepVarComp, ExecComp, ScipyOptimizer, SubProblem, CaseDriver\n'), ((1994, 2029), 'openmdao.api.CaseDriver', 'CaseDriver', ([], {'num_par_doe': 'num_par_doe'}), '(num_par_doe=num_par_doe)\n', (2004, 2029), False, 'from openmdao.api import Problem, Group, Component, IndepVarComp, ExecComp, ScipyOptimizer, SubProblem, CaseDriver\n'), ((1346, 1368), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(0.0)'], {}), "('x', 0.0)\n", (1358, 1368), False, 'from openmdao.api import Problem, Group, Component, IndepVarComp, ExecComp, ScipyOptimizer, SubProblem, CaseDriver\n'), ((1572, 1629), 'openmdao.api.SubProblem', 'SubProblem', (['sub'], {'params': "['indep.x']", 'unknowns': "['comp.fx']"}), "(sub, params=['indep.x'], unknowns=['comp.fx'])\n", (1582, 1629), False, 'from openmdao.api import Problem, Group, Component, IndepVarComp, ExecComp, ScipyOptimizer, SubProblem, CaseDriver\n'), ((506, 528), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(0.0)'], {}), "('x', 0.0)\n", (518, 528), False, 'from openmdao.api import Problem, Group, Component, IndepVarComp, ExecComp, ScipyOptimizer, SubProblem, CaseDriver\n'), ((555, 584), 'openmdao.api.ExecComp', 'ExecComp', (['"""fx = cos(x)-x/10."""'], {}), "('fx = cos(x)-x/10.')\n", (563, 584), False, 'from openmdao.api import Problem, Group, Component, IndepVarComp, ExecComp, ScipyOptimizer, SubProblem, CaseDriver\n'), ((1305, 1312), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (1310, 1312), False, 'from openmdao.api import Problem, Group, Component, IndepVarComp, ExecComp, ScipyOptimizer, SubProblem, CaseDriver\n')]
|
# utilitary functions to create the expert and volunteers oracles from the taskruns dataset
import pandas as pd
from modules.utils import aux_functions
from modules.utils import firefox_dataset_p2 as fd
class Br_Feat_Oracle_Creator:
def __init__(self, bugreports, features):
self.bugreports = bugreports
self.features = features
def __shift_taskruns_answers(self, taskruns):
new_answers = list(taskruns.answers.values)
new_answers = [new_answers[-1]] + new_answers
del new_answers[-1]
taskruns['new_answers'] = new_answers
return taskruns
def __create_exp_feat_br_matrix(self, expert_taskruns):
taskruns_expert = self.__shift_taskruns_answers(expert_taskruns)
taskruns_expert.sort_values(by='bug_id', inplace=True)
taskruns_expert = taskruns_expert[(taskruns_expert.bug_id != 1181835) & (taskruns_expert.bug_id != 1315514)] # drop taskrun lost during empirical study
feat_br_matrix = pd.DataFrame(columns=self.features.feat_name.values,
index=self.bugreports.Bug_Number)
feat_br_matrix.index.names = ['bug_number']
for idx,row in taskruns_expert.iterrows():
ans = row.new_answers.split(" ")
for i in range(len(ans)-2): # -2 ==> dropped features from branch 65
feat_name = feat_br_matrix.columns[i]
feat_br_matrix.at[row.bug_id, feat_name] = int(ans[i])
return feat_br_matrix
def create_br_feat_expert_matrix(self, expert_taskruns):
feat_br_matrix = self.__create_exp_feat_br_matrix(expert_taskruns)
fd.Feat_BR_Oracles.write_feat_br_expert_df(feat_br_matrix)
def create_br_feat_expert_2_matrix(self, expert_taskruns):
feat_br_matrix = self.__create_exp_feat_br_matrix(expert_taskruns)
fd.Feat_BR_Oracles.write_feat_br_expert_2_df(feat_br_matrix)
def create_br_feat_volunteers_matrix(self, taskruns_volunteers_1, taskruns_volunteers_2):
ignored_taskruns = [154, 155, 156, 157, 169, 170, 171, 172, 183, 184, 196,
197, 198, 199, 200, 201, 202, 203, 204, 206, 241, 242,
253, 264, 265, 266, 267, 268, 269, 270]
taskruns_volunteers_1 = self.__shift_taskruns_answers(taskruns_volunteers_1)
taskruns_volunteers_2 = self.__shift_taskruns_answers(taskruns_volunteers_2)
taskruns = pd.concat([taskruns_volunteers_1, taskruns_volunteers_2])
taskruns.sort_values(by='bug_id', inplace=True)
taskruns = taskruns[(taskruns.bug_id != 1181835) & (taskruns.bug_id != 1315514)] # drop taskrun lost during empirical study
not_ignored_taskruns = [t_id for t_id in taskruns.id.values if t_id not in ignored_taskruns]
taskruns = taskruns[taskruns.id.isin(not_ignored_taskruns)]
feat_br_matrix = pd.DataFrame(columns=self.features.feat_name.values,
index=self.bugreports.Bug_Number)
feat_br_matrix.index.names = ['bug_number']
for idx,row in taskruns.iterrows():
ans = row.new_answers.split(" ")
for i in range(len(ans)-2): # -2 ==> dropped features from branch 65
feat_name = feat_br_matrix.columns[i]
feat_br_matrix.at[row.bug_id, feat_name] = int(ans[i])
fd.Feat_BR_Oracles.write_feat_br_volunteers_df(feat_br_matrix)
|
[
"pandas.DataFrame",
"modules.utils.firefox_dataset_p2.Feat_BR_Oracles.write_feat_br_expert_2_df",
"modules.utils.firefox_dataset_p2.Feat_BR_Oracles.write_feat_br_volunteers_df",
"modules.utils.firefox_dataset_p2.Feat_BR_Oracles.write_feat_br_expert_df",
"pandas.concat"
] |
[((1011, 1102), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.features.feat_name.values', 'index': 'self.bugreports.Bug_Number'}), '(columns=self.features.feat_name.values, index=self.bugreports.\n Bug_Number)\n', (1023, 1102), True, 'import pandas as pd\n'), ((1678, 1736), 'modules.utils.firefox_dataset_p2.Feat_BR_Oracles.write_feat_br_expert_df', 'fd.Feat_BR_Oracles.write_feat_br_expert_df', (['feat_br_matrix'], {}), '(feat_br_matrix)\n', (1720, 1736), True, 'from modules.utils import firefox_dataset_p2 as fd\n'), ((1893, 1953), 'modules.utils.firefox_dataset_p2.Feat_BR_Oracles.write_feat_br_expert_2_df', 'fd.Feat_BR_Oracles.write_feat_br_expert_2_df', (['feat_br_matrix'], {}), '(feat_br_matrix)\n', (1937, 1953), True, 'from modules.utils import firefox_dataset_p2 as fd\n'), ((2528, 2585), 'pandas.concat', 'pd.concat', (['[taskruns_volunteers_1, taskruns_volunteers_2]'], {}), '([taskruns_volunteers_1, taskruns_volunteers_2])\n', (2537, 2585), True, 'import pandas as pd\n'), ((2995, 3086), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.features.feat_name.values', 'index': 'self.bugreports.Bug_Number'}), '(columns=self.features.feat_name.values, index=self.bugreports.\n Bug_Number)\n', (3007, 3086), True, 'import pandas as pd\n'), ((3470, 3532), 'modules.utils.firefox_dataset_p2.Feat_BR_Oracles.write_feat_br_volunteers_df', 'fd.Feat_BR_Oracles.write_feat_br_volunteers_df', (['feat_br_matrix'], {}), '(feat_br_matrix)\n', (3516, 3532), True, 'from modules.utils import firefox_dataset_p2 as fd\n')]
|
'''
precision_and_recall.py
Run MATCH with PeTaL data.
Last modified on 10 August 2021.
DESCRIPTION
precision_and_recall.py produces three plots from results in MATCH/PeTaL.
These three plots appear in plots/YYYYMMDD_precision_recall and are
as follows:
- HHMMSS_labels_MATCH_PeTaL.png, which varies threshold and plots number
of labels predicted. Higher threshold means fewer labels get past the threshold.
- HHMMSS_prc_MATCH_PeTaL.png, which plots a precision-recall curve by varying
the threshold. As threshold decreases from 1 to 0, precision goes down but recall
goes up (because more labels get past the threshold).
- HHMMSS_prf1_MATCH_PeTaL.png, which plots how precision, recall, and F1 score
vary as threshold varies from 0 to 1.
OPTIONS
-m, --match PATH/TO/MATCH
Path of MATCH folder.
-p, --plots PATH/TO/plots
Path of plots folder.
-d, --dataset PeTaL
Name of dataset, e.g., "PeTaL".
-v, --verbose
Enable verbosity.
USAGE
python3 precision_and_recall.py -m ../src/MATCH -p ../plots --verbose
Authors: <NAME> (<EMAIL>, <EMAIL>)
'''
import click
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from datetime import datetime
import logging
from collections import namedtuple
from tqdm import tqdm
Stats = namedtuple("Stats", "threshold topk precision recall f1")
@click.command()
@click.option('--match', '-m', 'match_path', type=click.Path(exists=True), help='Path of MATCH folder.')
@click.option('--plots', '-p', 'plots_path', type=click.Path(exists=True), help='Path of plots folder.')
@click.option('--dataset', '-d', 'dataset', default='PeTaL', help='Name of dataset, e.g., "PeTaL".')
@click.option('--verbose', '-v', type=click.BOOL, is_flag=True, default=False, required=False, help='Verbose output.')
def main(match_path, plots_path, dataset, verbose):
"""Plots precision and recall and other statistics on graphs.
Args:
match_path (str): Path of MATCH folder.
plots_path (str): Path of plots folder.
verbose (bool): Verbose output.
"""
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s:%(name)s] %(message)s"
)
PRlogger = logging.getLogger("P&R")
DATASET = dataset
MODEL = 'MATCH'
res_labels = np.load(f"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy", allow_pickle=True)
res_scores = np.load(f"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy", allow_pickle=True)
test_labels = np.load(f"{match_path}/{DATASET}/test_labels.npy", allow_pickle=True)
train_labels = np.load(f"{match_path}/{DATASET}/train_labels.npy", allow_pickle=True)
if verbose:
PRlogger.info(f"Computing statistics by varying threshold for {MODEL} on {DATASET}.")
thresholds = list(x / 10000 for x in range(1, 10)) + \
list(x / 1000 for x in range(1, 10)) + \
list(x / 100 for x in range(1, 10)) + \
list(x / 20 for x in range(2, 19)) + \
list((90 + x) / 100 for x in range(1, 10)) + \
list((990 + x) / 1000 for x in range(1, 10)) + \
list((9990 + x) / 10000 for x in range(1, 10))
ps = []
rs = []
ts = []
f1s = []
topks = []
for threshold in tqdm(thresholds):
stats = compute_stats(threshold, res_labels, res_scores, test_labels)
ps.append(stats.precision)
rs.append(stats.recall)
ts.append(threshold)
f1s.append(stats.f1)
topks.append(stats.topk)
'''
Make the following plots to assess the performance of the model.
Precision-recall curve
Precision, recall, and F1 score by varying threshold
Numbers of labels predicted by varying threshold
'''
ALL_PLOTS_PATH = plots_path
if not os.path.exists(ALL_PLOTS_PATH):
os.mkdir(ALL_PLOTS_PATH)
else:
if verbose:
PRlogger.info(f"You already have a plots directory at {ALL_PLOTS_PATH}.")
now = datetime.now()
date_str = now.strftime("%Y%m%d")
time_str = now.strftime("%H%M%S")
comment = f"precision_recall" # "_on_{DATASET}"
PLOTS_PATH = os.path.join(ALL_PLOTS_PATH, f"{date_str}_{comment}")
if not os.path.exists(PLOTS_PATH):
os.mkdir(PLOTS_PATH)
if verbose:
PRlogger.info(f"New plots directory at {PLOTS_PATH}")
else:
if verbose:
PRlogger.info(f"You already have a plots directory at {PLOTS_PATH}")
########################################
# PRECISION-RECALL CURVE
########################################
plt.grid()
plt.title(f'Precision-Recall Curve for {MODEL} on {DATASET}, varying threshold')
plt.plot(ps, rs, linestyle='-')
plt.xlabel('Recall')
plt.xlim(0, 1)
plt.ylabel('Precision')
plt.ylim(0, 1)
PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png')
plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False)
PRlogger.info(f"Your plot is saved as {PLOT_PATH}")
plt.clf()
########################################
# PRECISION, RECALL, AND F1 SCORE BY THRESHOLD
########################################
plt.grid()
plt.title(f'Precision, Recall, and F1 Score by Threshold for {MODEL} on {DATASET}')
plt.plot(ts, ps, linestyle='-', label='Precision')
plt.plot(ts, rs, linestyle='-', label='Recall')
plt.plot(ts, f1s, linestyle='-', label='F1 score')
plt.xlabel('Threshold')
plt.xlim(0, 1)
plt.ylabel('Metrics')
plt.ylim(0, 1)
plt.legend()
PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png')
plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False)
PRlogger.info(f"Your plot is saved as {PLOT_PATH}")
plt.clf()
########################################
# NUMBER OF LABELS PREDICTED BY THRESHOLD
########################################
plt.grid()
plt.title(f'Number of Labels Predicted by Threshold for {MODEL} on {DATASET}')
plt.plot(ts, topks, linestyle='-', label='Number of Labels')
plt.xlabel('Threshold')
plt.xlim(0, 1)
plt.ylabel('Labels')
plt.legend()
PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png')
plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False)
PRlogger.info(f"Your plot is saved as {PLOT_PATH}")
plt.clf()
def compute_stats(threshold, res_labels, res_scores, test_labels):
"""
compute_stats(threshold)
Parameters:
threshold: float, 0.0 < threshold < 1.0
res_labels: numpy array of predicted labels
res_scores: numpy array of predicted label scores
test_labels: numpy array of target labels
Returns:
Stats object containing
threshold
topk: average number of labels above threshold
precision: average precision across examples
recall: average recall across examples
f1: average F1 score across examples
Note:
precision, recall, and F1 scores are macro (averaged across examples, not labels)
"""
precisions = []
recalls = []
topks = []
f1s = []
for res_label, res_score, test_label in zip(res_labels, res_scores, test_labels):
topk = np.argmax(res_score < threshold) # topk becomes the number of labels scoring above the threshold
precision = 1.0 if topk == 0 else np.mean([1 if x in test_label else 0 for x in res_label[:topk]])
recall = np.mean([1 if x in res_label[:topk] else 0 for x in test_label])
f1 = 0 if (precision + recall) == 0 else (2 * precision * recall) / (precision + recall)
topks.append(topk)
precisions.append(precision)
recalls.append(recall)
f1s.append(f1)
# print(res_label[:topk], precision, recall)
return Stats(threshold, np.mean(topks), np.mean(precisions), np.mean(recalls), np.mean(f1s))
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.title",
"os.mkdir",
"numpy.load",
"matplotlib.pyplot.clf",
"numpy.argmax",
"click.option",
"logging.getLogger",
"numpy.mean",
"click.Path",
"os.path.join",
"os.path.exists",
"click.command",
"datetime.datetime.now",
"tqdm.tqdm",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlim",
"logging.basicConfig",
"matplotlib.pyplot.plot",
"collections.namedtuple",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((1459, 1516), 'collections.namedtuple', 'namedtuple', (['"""Stats"""', '"""threshold topk precision recall f1"""'], {}), "('Stats', 'threshold topk precision recall f1')\n", (1469, 1516), False, 'from collections import namedtuple\n'), ((1519, 1534), 'click.command', 'click.command', ([], {}), '()\n', (1532, 1534), False, 'import click\n'), ((1746, 1850), 'click.option', 'click.option', (['"""--dataset"""', '"""-d"""', '"""dataset"""'], {'default': '"""PeTaL"""', 'help': '"""Name of dataset, e.g., "PeTaL"."""'}), '(\'--dataset\', \'-d\', \'dataset\', default=\'PeTaL\', help=\n \'Name of dataset, e.g., "PeTaL".\')\n', (1758, 1850), False, 'import click\n'), ((1847, 1969), 'click.option', 'click.option', (['"""--verbose"""', '"""-v"""'], {'type': 'click.BOOL', 'is_flag': '(True)', 'default': '(False)', 'required': '(False)', 'help': '"""Verbose output."""'}), "('--verbose', '-v', type=click.BOOL, is_flag=True, default=\n False, required=False, help='Verbose output.')\n", (1859, 1969), False, 'import click\n'), ((2244, 2333), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""[%(asctime)s:%(name)s] %(message)s"""'}), "(level=logging.INFO, format=\n '[%(asctime)s:%(name)s] %(message)s')\n", (2263, 2333), False, 'import logging\n'), ((2366, 2390), 'logging.getLogger', 'logging.getLogger', (['"""P&R"""'], {}), "('P&R')\n", (2383, 2390), False, 'import logging\n'), ((2453, 2547), 'numpy.load', 'np.load', (['f"""{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy"""'], {'allow_pickle': '(True)'}), "(f'{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy',\n allow_pickle=True)\n", (2460, 2547), True, 'import numpy as np\n'), ((2561, 2655), 'numpy.load', 'np.load', (['f"""{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy"""'], {'allow_pickle': '(True)'}), "(f'{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy',\n allow_pickle=True)\n", (2568, 2655), True, 'import numpy as np\n'), ((2670, 2739), 'numpy.load', 'np.load', (['f"""{match_path}/{DATASET}/test_labels.npy"""'], {'allow_pickle': '(True)'}), "(f'{match_path}/{DATASET}/test_labels.npy', allow_pickle=True)\n", (2677, 2739), True, 'import numpy as np\n'), ((2759, 2829), 'numpy.load', 'np.load', (['f"""{match_path}/{DATASET}/train_labels.npy"""'], {'allow_pickle': '(True)'}), "(f'{match_path}/{DATASET}/train_labels.npy', allow_pickle=True)\n", (2766, 2829), True, 'import numpy as np\n'), ((3398, 3414), 'tqdm.tqdm', 'tqdm', (['thresholds'], {}), '(thresholds)\n', (3402, 3414), False, 'from tqdm import tqdm\n'), ((4132, 4146), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4144, 4146), False, 'from datetime import datetime\n'), ((4292, 4345), 'os.path.join', 'os.path.join', (['ALL_PLOTS_PATH', 'f"""{date_str}_{comment}"""'], {}), "(ALL_PLOTS_PATH, f'{date_str}_{comment}')\n", (4304, 4345), False, 'import os\n'), ((4741, 4751), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4749, 4751), True, 'from matplotlib import pyplot as plt\n'), ((4756, 4841), 'matplotlib.pyplot.title', 'plt.title', (['f"""Precision-Recall Curve for {MODEL} on {DATASET}, varying threshold"""'], {}), "(f'Precision-Recall Curve for {MODEL} on {DATASET}, varying threshold'\n )\n", (4765, 4841), True, 'from matplotlib import pyplot as plt\n'), ((4841, 4872), 'matplotlib.pyplot.plot', 'plt.plot', (['ps', 'rs'], {'linestyle': '"""-"""'}), "(ps, rs, linestyle='-')\n", (4849, 4872), True, 'from matplotlib import pyplot as plt\n'), ((4877, 4897), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (4887, 4897), True, 'from matplotlib import pyplot as plt\n'), ((4902, 4916), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (4910, 4916), True, 'from matplotlib import pyplot as plt\n'), ((4921, 4944), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (4931, 4944), True, 'from matplotlib import pyplot as plt\n'), ((4949, 4963), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (4957, 4963), True, 'from matplotlib import pyplot as plt\n'), ((4981, 5046), 'os.path.join', 'os.path.join', (['PLOTS_PATH', 'f"""{time_str}_prc_{MODEL}_{DATASET}.png"""'], {}), "(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png')\n", (4993, 5046), False, 'import os\n'), ((5051, 5113), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': 'PLOT_PATH', 'facecolor': '"""w"""', 'transparent': '(False)'}), "(fname=PLOT_PATH, facecolor='w', transparent=False)\n", (5062, 5113), True, 'from matplotlib import pyplot as plt\n'), ((5174, 5183), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5181, 5183), True, 'from matplotlib import pyplot as plt\n'), ((5331, 5341), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5339, 5341), True, 'from matplotlib import pyplot as plt\n'), ((5346, 5434), 'matplotlib.pyplot.title', 'plt.title', (['f"""Precision, Recall, and F1 Score by Threshold for {MODEL} on {DATASET}"""'], {}), "(\n f'Precision, Recall, and F1 Score by Threshold for {MODEL} on {DATASET}')\n", (5355, 5434), True, 'from matplotlib import pyplot as plt\n'), ((5434, 5484), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'ps'], {'linestyle': '"""-"""', 'label': '"""Precision"""'}), "(ts, ps, linestyle='-', label='Precision')\n", (5442, 5484), True, 'from matplotlib import pyplot as plt\n'), ((5489, 5536), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'rs'], {'linestyle': '"""-"""', 'label': '"""Recall"""'}), "(ts, rs, linestyle='-', label='Recall')\n", (5497, 5536), True, 'from matplotlib import pyplot as plt\n'), ((5541, 5591), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'f1s'], {'linestyle': '"""-"""', 'label': '"""F1 score"""'}), "(ts, f1s, linestyle='-', label='F1 score')\n", (5549, 5591), True, 'from matplotlib import pyplot as plt\n'), ((5596, 5619), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Threshold"""'], {}), "('Threshold')\n", (5606, 5619), True, 'from matplotlib import pyplot as plt\n'), ((5624, 5638), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (5632, 5638), True, 'from matplotlib import pyplot as plt\n'), ((5643, 5664), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Metrics"""'], {}), "('Metrics')\n", (5653, 5664), True, 'from matplotlib import pyplot as plt\n'), ((5669, 5683), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (5677, 5683), True, 'from matplotlib import pyplot as plt\n'), ((5688, 5700), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5698, 5700), True, 'from matplotlib import pyplot as plt\n'), ((5718, 5784), 'os.path.join', 'os.path.join', (['PLOTS_PATH', 'f"""{time_str}_prf1_{MODEL}_{DATASET}.png"""'], {}), "(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png')\n", (5730, 5784), False, 'import os\n'), ((5789, 5851), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': 'PLOT_PATH', 'facecolor': '"""w"""', 'transparent': '(False)'}), "(fname=PLOT_PATH, facecolor='w', transparent=False)\n", (5800, 5851), True, 'from matplotlib import pyplot as plt\n'), ((5912, 5921), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5919, 5921), True, 'from matplotlib import pyplot as plt\n'), ((6064, 6074), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6072, 6074), True, 'from matplotlib import pyplot as plt\n'), ((6079, 6157), 'matplotlib.pyplot.title', 'plt.title', (['f"""Number of Labels Predicted by Threshold for {MODEL} on {DATASET}"""'], {}), "(f'Number of Labels Predicted by Threshold for {MODEL} on {DATASET}')\n", (6088, 6157), True, 'from matplotlib import pyplot as plt\n'), ((6162, 6222), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'topks'], {'linestyle': '"""-"""', 'label': '"""Number of Labels"""'}), "(ts, topks, linestyle='-', label='Number of Labels')\n", (6170, 6222), True, 'from matplotlib import pyplot as plt\n'), ((6227, 6250), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Threshold"""'], {}), "('Threshold')\n", (6237, 6250), True, 'from matplotlib import pyplot as plt\n'), ((6255, 6269), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (6263, 6269), True, 'from matplotlib import pyplot as plt\n'), ((6274, 6294), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Labels"""'], {}), "('Labels')\n", (6284, 6294), True, 'from matplotlib import pyplot as plt\n'), ((6299, 6311), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6309, 6311), True, 'from matplotlib import pyplot as plt\n'), ((6329, 6397), 'os.path.join', 'os.path.join', (['PLOTS_PATH', 'f"""{time_str}_labels_{MODEL}_{DATASET}.png"""'], {}), "(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png')\n", (6341, 6397), False, 'import os\n'), ((6402, 6464), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': 'PLOT_PATH', 'facecolor': '"""w"""', 'transparent': '(False)'}), "(fname=PLOT_PATH, facecolor='w', transparent=False)\n", (6413, 6464), True, 'from matplotlib import pyplot as plt\n'), ((6525, 6534), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6532, 6534), True, 'from matplotlib import pyplot as plt\n'), ((3940, 3970), 'os.path.exists', 'os.path.exists', (['ALL_PLOTS_PATH'], {}), '(ALL_PLOTS_PATH)\n', (3954, 3970), False, 'import os\n'), ((3980, 4004), 'os.mkdir', 'os.mkdir', (['ALL_PLOTS_PATH'], {}), '(ALL_PLOTS_PATH)\n', (3988, 4004), False, 'import os\n'), ((4358, 4384), 'os.path.exists', 'os.path.exists', (['PLOTS_PATH'], {}), '(PLOTS_PATH)\n', (4372, 4384), False, 'import os\n'), ((4394, 4414), 'os.mkdir', 'os.mkdir', (['PLOTS_PATH'], {}), '(PLOTS_PATH)\n', (4402, 4414), False, 'import os\n'), ((1585, 1608), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (1595, 1608), False, 'import click\n'), ((1690, 1713), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (1700, 1713), False, 'import click\n'), ((7500, 7532), 'numpy.argmax', 'np.argmax', (['(res_score < threshold)'], {}), '(res_score < threshold)\n', (7509, 7532), True, 'import numpy as np\n'), ((7721, 7787), 'numpy.mean', 'np.mean', (['[(1 if x in res_label[:topk] else 0) for x in test_label]'], {}), '([(1 if x in res_label[:topk] else 0) for x in test_label])\n', (7728, 7787), True, 'import numpy as np\n'), ((8082, 8096), 'numpy.mean', 'np.mean', (['topks'], {}), '(topks)\n', (8089, 8096), True, 'import numpy as np\n'), ((8098, 8117), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (8105, 8117), True, 'import numpy as np\n'), ((8119, 8135), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (8126, 8135), True, 'import numpy as np\n'), ((8137, 8149), 'numpy.mean', 'np.mean', (['f1s'], {}), '(f1s)\n', (8144, 8149), True, 'import numpy as np\n'), ((7639, 7705), 'numpy.mean', 'np.mean', (['[(1 if x in test_label else 0) for x in res_label[:topk]]'], {}), '([(1 if x in test_label else 0) for x in res_label[:topk]])\n', (7646, 7705), True, 'import numpy as np\n')]
|
####################데이터프레임의 문자열 컬럼들을 합치는 등의 작업으로 새로운 컬럼 생성#######################################
#이용함수 apply
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
# df = pd.DataFrame({'id' : [1,2,10,20,100,200],
# "name":['aaa','bbb','ccc','ddd','eee','fff']})
# print(df)
#
# #컬럼을 변경하여 새로운 컬럼을 생성
# #새로운 id 컬럼을 원래의 id 컬럼을 기준으로 자리수를 맞춰주기 위해 5자리로 통일(부족한 자릿수는 앞자리에 0을 채워 넣는다.)하며 만듬
# df['id_2']=df['id'].apply(lambda x:"{:0>5d}".format(x))
# print(df)
# # id name id_2
# # 0 1 aaa 00001
# # 1 2 bbb 00002
# # 2 10 ccc 00010
# # 3 20 ddd 00020
# # 4 100 eee 00100
# # 5 200 fff 00200
#
# # #format():앞자리의 형식으로 ()안의 인자의 모양을 바꿔준다.
# #
# # x=3.141592
# # print("{:.2f}".format(x))
# # # 3.14
# #
# # print("{:+.2f}".format(x))
# # # +3.14
# #
# # x=-3.141592
# # print("{:+.2f}".format(x))
# # # -3.14
# #
# # x=2.718
# # print("{:.0f}".format(x)) # 정수를 출력하라(소수 점 첫째자리에서 반올림)
# # # 3
# #
# # x=3.147592
# # print("{:.2f}".format(x)) # .2f(소수 점 셋째자리에서 반올림)
# # # 3.15
# #
# # x=5
# # print("{:0>2d}".format(x)) # 0>2D(D: 너비가 2, 0으로 채워라 )
# # # 05
# #
# # x=7777777777
# # print("{:0>5d}".format(x)) # 0>2D(D: 너비가 5, 0으로 채워라 ,너비 이상은 무시=>원 형태 유지)
# # # 7777777777
# # print("{:,}".format(x))
# # # 7,777,777,777
# #
# # x=0.25
# # print("{:.2%}".format(x))
# # # 25.00%
# #
#
# #name + id_2 :재구성 => 두개의 컬럼이 결합(apply대상이 2개의 컬럼이 됨)
# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x)) #축을 지정 하지 않으면 안됨
# print(df)
# # id name id_2 id_name
# # 0 1 aaa 00001 NaN
# # 1 2 bbb 00002 NaN
# # 2 10 ccc 00010 NaN
# # 3 20 ddd 00020 NaN
# # 4 100 eee 00100 NaN
# # 5 200 fff 00200 NaN
#
# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #축을 1로 지정
# print(df)
# # id name id_2 id_name
# # 0 1 aaa 00001 00001_aaa
# # 1 2 bbb 00002 00002_bbb
# # 2 10 ccc 00010 00010_ccc
# # 3 20 ddd 00020 00020_ddd
# # 4 100 eee 00100 00100_eee
# # 5 200 fff 00200 00200_fff
#
# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1)
# print(df)
# # id name id_2 id_name
# # 0 1 aaa 00001 00001_aaa
# # 1 2 bbb 00002 00002_bbb
# # 2 10 ccc 00010 00010_ccc
# # 3 20 ddd 00020 00020_ddd
# # 4 100 eee 00100 00100_eee
# # 5 200 fff 00200 00200_fff
#
#
# #id를 소숫점 이하로 나타내는 새로운 열을 추가
# df['id_3']=df['id'].apply(lambda x: "{:.2f}".format(x))
# print(df)
# # id name id_2 id_name id_3
# # 0 1 aaa 00001 00001_aaa 1.00
# # 1 2 bbb 00002 00002_bbb 2.00
# # 2 10 ccc 00010 00010_ccc 10.00
# # 3 20 ddd 00020 00020_ddd 20.00
# # 4 100 eee 00100 00100_eee 100.00
# # 5 200 fff 00200 00200_fff 200.00
#
# df['name_3']=df['name'].apply(lambda x:x.upper()) #upper() : 대문자로 바꿔줌
# print(df)
# # id name id_2 id_name id_3 name_3
# # 0 1 aaa 00001 00001_aaa 1.00 AAA
# # 1 2 bbb 00002 00002_bbb 2.00 BBB
# # 2 10 ccc 00010 00010_ccc 10.00 CCC
# # 3 20 ddd 00020 00020_ddd 20.00 DDD
# # 4 100 eee 00100 00100_eee 100.00 EEE
# # 5 200 fff 00200 00200_fff 200.00 FFF
#
#
# # id_name_3 컬럼추가
# # id_name_3 => 1.00:AAA
#
# df['id_name_3'] = df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1)
# print(df)
# # id name id_2 id_name id_3 name_3 id_name_3
# # 0 1 aaa 00001 00001_aaa 1.00 AAA 1.00:AAA
# # 1 2 bbb 00002 00002_bbb 2.00 BBB 2.00:BBB
# # 2 10 ccc 00010 00010_ccc 10.00 CCC 10.00:CCC
# # 3 20 ddd 00020 00020_ddd 20.00 DDD 20.00:DDD
# # 4 100 eee 00100 00100_eee 100.00 EEE 100.00:EEE
# # 5 200 fff 00200 00200_fff 200.00 FFF 200.00:FFF
#
###################################################################################################################
#groupby 집계함수
# 1.딕셔너리를 이용해서 그룹화
#위.. 딕셔너리로 만들어서 열을 키로 자료를 밸류로 나타냄
# data= : 데이터를 넣고 컬럼과 인덱스로 자료를 구분.
df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4'])
print(df)
# c1 c2 c3 c4 c5
# r1 0 1 2 3 4
# r2 5 6 7 8 9
# r3 10 11 12 13 14
# r4 15 16 17 18 19
# row_g1 = r1+r2 : 행단위 계산으로 새로운 행 생성(같은 열의 성분이 더해진다.: sum())
# row_g2 = r3+r4
mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'}
gbr = df.groupby(mdr)
print(gbr.sum())
# c1 c2 c3 c4 c5
# row_g1 0 1 2 3 4
# row_g2 5 6 7 8 9
# row_g3 10 11 12 13 14
# row_g4 15 16 17 18 19
mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'}
gbr = df.groupby(mdr)
print(gbr.sum())
# c1 c2 c3 c4 c5
# row_g1 5 7 9 11 13
# row_g2 25 27 29 31 33
print(gbr.mean())
# c1 c2 c3 c4 c5
# row_g1 2.5 3.5 4.5 5.5 6.5
# row_g2 12.5 13.5 14.5 15.5 16.5
print(gbr.std())
# c1 c2 c3 c4 c5
# row_g1 3.535534 3.535534 3.535534 3.535534 3.535534
# row_g2 3.535534 3.535534 3.535534 3.535534 3.535534
# col_g1 = c1+c2 : 열단위 계산으로 새로운 열 생성(같은 행의 성분이 더해진다.: sum()) : 꼭 axis = 1을주어야 한다.
# col_g2 = c3+c4+c5
mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'}
gbc = df.groupby(mdc,axis=1) #꼭 axis = 1을주어야 한다.
print(gbc.sum())
# col_g1 col_g2
# r1 1 9
# r2 11 24
# r3 21 39
# r4 31 54
print(type(mdr))
# <class 'dict'>
print(mdr)
# {'r1': 'row_g1', 'r2': 'row_g1', 'r3': 'row_g2', 'r4': 'row_g2'}
# dic -> Series
# Series를 이용한 그룹화
msr = Series(mdr)
print(type(msr))
# <class 'pandas.core.series.Series'>
print(msr)
# r1 row_g1
# r2 row_g1
# r3 row_g2
# r4 row_g2
# dtype: object
print(df.groupby(msr).sum()) # 딕셔너리와 같은 결과
# c1 c2 c3 c4 c5
# row_g1 5 7 9 11 13
# row_g2 25 27 29 31 33
msc = Series(mdc)
print(df.groupby(msc,axis=1).sum())
# col_g1 col_g2
# r1 1 9
# r2 11 24
# r3 21 39
# r4 31 54
#함수를 이용한 그룹화
# 딕셔너리나 시리즈 대신 선언되는 rgf로 그룹화(df에 대한 정보가 x에 전달)
def rgf(x) :
if x == 'r1' or x == 'r2':
rg = 'row_g1'
else:
rg = 'row_g2'
return rg
# 딕셔너리나 시리즈의 모습에 맞춰서 그룹화 하여 그룹화 계산 함수의 결과에 맞춰 데이터프레임 생성
print(df.groupby(rgf).sum())
|
[
"numpy.arange",
"pandas.Series"
] |
[((5890, 5901), 'pandas.Series', 'Series', (['mdr'], {}), '(mdr)\n', (5896, 5901), False, 'from pandas import DataFrame, Series\n'), ((6222, 6233), 'pandas.Series', 'Series', (['mdc'], {}), '(mdc)\n', (6228, 6233), False, 'from pandas import DataFrame, Series\n'), ((4245, 4258), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (4254, 4258), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic evaluation script that evaluates a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import click
import yaml
from collections import Iterable, defaultdict
from itertools import cycle
import subprocess
import PIL
import math
import os
from PIL import Image
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.python.training import monitored_session
from datasets.plants import read_label_file
from datasets import dataset_factory
from nets import nets_factory
from preprocessing import preprocessing_factory
from matplotlib.font_manager import FontManager
import matplotlib
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
matplotlib.use('Agg')
import matplotlib.pyplot as plt
slim = tf.contrib.slim
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
OUTPUT_MODEL_NODE_NAMES_DICT = {
'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1',
'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1',
}
def define_tf_flags():
BATCH_SIZE = 100
tf.app.flags.DEFINE_integer(
'batch_size', BATCH_SIZE, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'max_num_batches', None,
'Max number of batches to evaluate by default use all.')
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The directory where the model was written to or an absolute path to a '
'checkpoint file.')
tf.app.flags.DEFINE_string(
'eval_dir', '/tmp/tfmodel/',
'Directory where the results are saved to.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_string(
'dataset_name', 'plants', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'validation',
'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None,
'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'mobilenet_v1',
'The name of the architecture to evaluate.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None,
'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
tf.app.flags.DEFINE_integer(
'eval_image_size', None, 'Eval image size')
FLAGS = tf.app.flags.FLAGS
def get_dataset_dir(config):
return get_config_value(config, 'dataset_dir')
def get_config_value(config, key):
return config.get(key) or getattr(FLAGS, key)
def get_checkpoint_dir_path(config):
return get_config_value(config, 'checkpoint_path')
def get_lastest_check_point(config):
checkpoint_path = get_checkpoint_dir_path(config)
if tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
return checkpoint_path
def inspect_tfrecords(tfrecords_filename):
record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename)
examples = []
for string_record in record_iterator:
example = tf.train.Example()
example.ParseFromString(string_record)
examples.append(example)
# print(example)
return examples
def get_info(config, checkpoint_path=None,
calculate_confusion_matrix=False):
dataset_dir = get_dataset_dir(config)
model_name = get_model_name(config)
# tf.logging.set_verbosity(tf.logging.INFO)
tf.Graph().as_default()
tf_global_step = slim.get_or_create_global_step()
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, dataset_dir)
####################
# Select the model #
####################
num_classes = (dataset.num_classes - FLAGS.labels_offset)
network_fn = nets_factory.get_network_fn(
model_name,
num_classes=num_classes,
is_training=False)
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_epochs=1, # 每張只讀一次
# num_readers=1,
shuffle=False,
common_queue_capacity=2 * FLAGS.batch_size,
common_queue_min=FLAGS.batch_size)
# common_queue_min=FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
raw_images = image
#####################################
# Select the preprocessing function #
#####################################
preprocessing_name = FLAGS.preprocessing_name or model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=False)
eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, eval_image_size, eval_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
allow_smaller_final_batch=True,
capacity=5 * FLAGS.batch_size)
####################
# Define the model #
####################
logits, _ = network_fn(images)
if FLAGS.moving_average_decay:
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, tf_global_step)
variables_to_restore = variable_averages.variables_to_restore(
slim.get_model_variables())
variables_to_restore[tf_global_step.op.name] = tf_global_step
else:
variables_to_restore = slim.get_variables_to_restore()
predictions = tf.argmax(logits, 1)
one_hot_predictions = slim.one_hot_encoding(
predictions, dataset.num_classes - FLAGS.labels_offset)
labels = tf.squeeze(labels)
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
'Recall_5': slim.metrics.streaming_recall_at_k(
logits, labels, 5),
})
if calculate_confusion_matrix:
confusion_matrix = tf.confusion_matrix(labels=labels,
num_classes=num_classes,
predictions=predictions)
else:
confusion_matrix = None
# Print the summaries to screen.
for name, value in names_to_values.items():
summary_name = 'eval/%s' % name
op = tf.summary.scalar(summary_name, value, collections=[])
op = tf.Print(op, [value], summary_name)
tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
# TODO(sguada) use num_epochs=1
if FLAGS.max_num_batches:
num_batches = FLAGS.max_num_batches
else:
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))
checkpoint_path = checkpoint_path or get_lastest_check_point(config)
tf.logging.info('Evaluating %s' % checkpoint_path)
labels_to_names = read_label_file(dataset_dir)
probabilities = tf.nn.softmax(logits)
softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy(
one_hot_predictions, logits, label_smoothing=0.0, weights=1.0)
grad_imgs = tf.gradients(softmax_cross_entropy_loss,
images)[0]
return {
'labels_to_names': labels_to_names,
'checkpoint_path': checkpoint_path,
'num_batches': num_batches,
'names_to_values': names_to_values,
'names_to_updates': names_to_updates,
'variables_to_restore': variables_to_restore,
'images': images,
'raw_images': raw_images,
'network_fn': network_fn,
'labels': labels,
'logits': logits,
'probabilities': probabilities,
'predictions': predictions,
'confusion_matrix': confusion_matrix,
'loss': softmax_cross_entropy_loss,
'grad_imgs': grad_imgs,
}
def get_monitored_session(checkpoint_path):
session_creator = monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
# scaffold=scaffold,
# master=master,
# config=config
)
return monitored_session.MonitoredSession(
session_creator=session_creator)
def plot_confusion_matrix(confusion_matrix, labels_to_names=None,
save_dir='.'):
import seaborn as sns
set_matplot_zh_font()
# ax = plt.subplot()
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(18, 15)
# https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black
# confusion_matrix = np.ma.masked_where(confusion_matrix < 0.01,
# confusion_matrix)
cmap = plt.get_cmap('Accent')
# cmap = plt.get_cmap('coolwarm')
# cmap = plt.get_cmap('plasma')
# cmap = plt.get_cmap('Blues')
# cmap.set_bad(color='black')
mask = np.zeros_like(confusion_matrix)
mask[confusion_matrix == 0] = True
# sns.set(font_scale=1)
with sns.axes_style('darkgrid'):
sns.heatmap(confusion_matrix,
linewidths=0.2,
linecolor='#eeeeee',
xticklabels=True,
yticklabels=True,
mask=mask, annot=False, ax=ax, cmap=cmap)
n = confusion_matrix.shape[0]
# labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title('Confusion Matrix')
axis = [labels_to_names[i] if labels_to_names else i
for i in range(n)]
ax.xaxis.set_ticklabels(axis, rotation=270)
ax.yaxis.set_ticklabels(axis, rotation=0)
pic_path = os.path.join(save_dir, 'confusion_matrix.png')
plt.savefig(pic_path)
print(pic_path, 'saved')
print('plot shown')
plt.show()
def get_matplot_zh_font():
# From https://blog.csdn.net/kesalin/article/details/71214038
fm = FontManager()
mat_fonts = set(f.name for f in fm.ttflist)
output = subprocess.check_output('fc-list :lang=zh-tw -f "%{family}\n"',
shell=True)
zh_fonts = set(f.split(',', 1)[0] for f in output.split('\n'))
available = list(mat_fonts & zh_fonts)
return available
def set_matplot_zh_font():
available = get_matplot_zh_font()
if len(available) > 0:
plt.rcParams['font.sans-serif'] = [available[0]] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False
def deprocess_image(x, target_std=0.15):
# normalize tensor
x = np.abs(x)
x = np.max(x, axis=2)
x -= x.mean()
std = x.std()
if std:
x /= std
x *= target_std
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x
def plot_image_in_grids(image_list, n_columns, file_name=None):
image_table = chunks(image_list, n_columns)
n_row = len(image_table)
plt.figure(figsize=(15, 10))
i = 1
for row in image_table:
for col in row:
plt.subplot(n_row, n_columns, i)
plt.imshow(col)
i += 1
if file_name:
plt.savefig(file_name)
print(file_name, 'saved')
else:
print('plot shown')
plt.show()
def plot_saliency(saliency, image, file_name=None):
plt.figure(figsize=(15, 10))
plot_image_in_grids([
[saliency, image]
], file_name)
def _eval_tensors(config, checkpoint_path=None, keys=None, use_cached=False):
checkpoint_dir_path = get_checkpoint_dir_path(config)
if use_cached:
aggregated = load_var(checkpoint_dir_path, 'run_info_result.h5')
if aggregated is not None:
return aggregated
calculate_confusion_matrix = True
info = get_info(config,
calculate_confusion_matrix=calculate_confusion_matrix)
num_batches = info['num_batches']
aggregated = {}
checkpoint_path = checkpoint_path or get_lastest_check_point(config)
with get_monitored_session(checkpoint_path) as sess:
for i in range(int(math.ceil(num_batches))):
print('batch #{} of {}'.format(i, num_batches))
params = {
k: v
for k, v in info.items()
if isinstance(v, tf.Tensor) and (not keys or k in keys)
}
try:
feed_dict = {}
res = sess.run(params, feed_dict=feed_dict)
except:
import traceback
traceback.print_exc()
raise
for k in res.keys():
value = res[k]
if k == 'confusion_matrix':
if k not in aggregated:
aggregated[k] = np.matrix(value)
else:
aggregated[k] += np.matrix(value)
else:
if k not in aggregated:
aggregated[k] = []
if isinstance(value, Iterable):
aggregated[k].extend(value)
else:
aggregated[k].append(value)
labels = res['labels']
print('len labels', len(labels))
all_labels = aggregated['labels']
print('all_labels length', len(all_labels))
print('all_labels unique length', len(set(all_labels)))
if use_cached:
save_var(checkpoint_dir_path, 'run_info_result.h5', aggregated)
return aggregated
def _run_saliency_maps(config, use_cached=False):
checkpoint_path = get_lastest_check_point(config)
keys = [
'labels',
'images',
'grad_imgs',
]
aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached)
grad_imgs = aggregated['grad_imgs']
images = aggregated['images']
prefix = ''
save_saliency_maps(config, grad_imgs, images, prefix,
labels=aggregated['labels'])
def _run_info(config, use_cached=False):
checkpoint_path = get_lastest_check_point(config)
keys = [
'labels',
'images',
# 'raw_images',
'logits',
'probabilities',
'predictions',
'confusion_matrix',
# 'loss',
'grad_imgs',
]
aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached)
from collections import Counter
all_labels = aggregated['labels']
c = Counter(all_labels)
kv_pairs = sorted(dict(c).items(), key=lambda p: p[0])
for k, v in kv_pairs:
print(k, v)
def save_var(directory, file_name, info):
import h5py
info_file_path = os.path.join(directory, file_name)
f = h5py.File(info_file_path, 'w')
for k, v in info.items():
f[k] = v
f.close()
print(info_file_path, 'saved')
def load_var(directory, file_name):
import h5py
info_file_path = os.path.join(directory, file_name)
try:
with h5py.File(info_file_path, 'r') as f:
return {
k: f[k][:] for k in f.keys()
}
except IOError:
return None
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
return [l[i:i + n] for i in range(0, len(l), n)]
def save_saliency_maps(config, grad_imgs, images, prefix='', labels=None):
n = images.shape[0]
save_dir = 'saliency_maps'
labels_to_names = read_label_file(get_dataset_dir(config))
label_count_map = defaultdict(int)
try:
os.makedirs(save_dir)
except OSError:
pass
for j in range(n):
image = images[j]
grad_img = grad_imgs[j]
label = labels[j]
label_name = labels_to_names[label]
if label_count_map[label] >= 10:
continue
file_name = '{}/{}{:03d}.jpg'.format(
save_dir,
'{:02}_{}_{}'.format(
label, label_name.encode('utf-8'),
prefix) if labels is not None else prefix,
label_count_map[label])
saliency = deprocess_image(grad_img, target_std=0.3)
restored_image = ((image / 2 + 0.5) * 255).astype('uint8')
blend = get_image_with_saliency_map(restored_image, saliency)
plot_image_in_grids([
saliency,
restored_image,
blend,
], n_columns=2, file_name=file_name)
label_count_map[label] += 1
def _plot_roc(logits_list, labels, predictions, probabilities,
plot_all_classes=False, save_dir=None):
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
possible_labels = list(range(max(labels) + 1))
y_binary = label_binarize(labels, classes=possible_labels)
output_matrix = np.array(probabilities)
y_score_matrix = output_matrix
y_score_matrix = np.where(
y_score_matrix == np.max(y_score_matrix, axis=1)[:, None],
y_score_matrix, 0)
tpr = {}
fpr = {}
roc_auc = {}
for i in range(len(possible_labels)):
y_scores = y_score_matrix[:, i]
fpr[i], tpr[i], _ = roc_curve(y_binary[:, i], y_scores)
roc_auc[i] = auc(fpr[i], tpr[i])
# 參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
y_score_matrix_ravel = y_score_matrix.ravel()
i_positive = y_score_matrix_ravel != 0
fpr["highest_probability"], tpr[
"highest_probability"], micro_thresholds = roc_curve(
y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive])
roc_auc["highest_probability"] = auc(fpr["highest_probability"],
tpr["highest_probability"])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], micro_thresholds = roc_curve(
y_binary.ravel(), y_score_matrix.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
n_classes = len(possible_labels)
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# key_series = 'micro'
key_series = 'highest_probability'
i_optimal_micro = np.argmax(tpr[key_series] - fpr[key_series])
optimal_threshold_fpr = fpr[key_series][i_optimal_micro]
optimal_threshold_tpr = tpr[key_series][i_optimal_micro]
optimal_threshold = micro_thresholds[i_optimal_micro]
print('optimal_threshold_fpr:', optimal_threshold_fpr)
print('optimal_threshold_tpr:', optimal_threshold_tpr)
print('optimal_threshold:', optimal_threshold)
# Plot all ROC curves
plt.figure()
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
if plot_all_classes:
for i, color in zip(range(n_classes), colors):
label = 'ROC curve of class {0} (area = {1:0.2f})'.format(
i, roc_auc[i])
label = None
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label=label)
plt.plot(fpr["highest_probability"], tpr["highest_probability"],
label='ROC curve (area = {0:0.2f})'
''.format(roc_auc["highest_probability"]),
color='blue', linestyle=':', linewidth=4)
# plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
pic_path = os.path.join(save_dir, 'roc_curve.png')
plt.savefig(pic_path)
print(pic_path, 'saved')
print('ROC curve shown')
plt.show()
def _roc_analysis(config, use_cached=False):
checkpoint_dir_path = get_checkpoint_dir_path(config)
keys = [
'logits',
'labels',
'predictions',
'probabilities',
]
info = _eval_tensors(config, keys=keys, use_cached=use_cached)
logits_list = info['logits']
labels = info['labels']
predictions = info['predictions']
probabilities = info['probabilities']
_plot_roc(logits_list, labels, predictions, probabilities,
save_dir=checkpoint_dir_path)
return
def inspect_datasets(config):
dataset_dir = get_dataset_dir(config)
examples = []
for i in range(5):
tfrecords_filename = os.path.join(
dataset_dir,
'plants_validation_{:05d}-of-00005.tfrecord'.format(i))
examples.extend(inspect_tfrecords(tfrecords_filename))
print(len(examples))
examples = []
for i in range(5):
tfrecords_filename = os.path.join(
dataset_dir,
'plants_train_{:05d}-of-00005.tfrecord'.format(i))
examples.extend(inspect_tfrecords(tfrecords_filename))
print(len(examples))
def resize(im, target_smallest_size):
resize_ratio = 1.0 * target_smallest_size / min(list(im.size))
target_size = tuple(int(resize_ratio * l) for l in im.size)
return im.resize(target_size, PIL.Image.BILINEAR)
def central_crop(im, w, h):
half_w = im.size[0] / 2
half_h = im.size[1] / 2
return im.crop(
(half_w - w / 2, half_h - h / 2, half_w + w / 2, half_h + h / 2))
def pre_process_resnet(im, coreml=False):
target_smallest_size = 224
im1 = resize(im, target_smallest_size)
im2 = central_crop(im1, target_smallest_size, target_smallest_size)
arr = np.asarray(im2).astype(np.float32)
if not coreml:
arr[:, :, 0] -= _R_MEAN
arr[:, :, 1] -= _G_MEAN
arr[:, :, 2] -= _B_MEAN
return arr
def central_crop_by_fraction(im, central_fraction):
w = im.size[0]
h = im.size[1]
return central_crop(im, w * central_fraction, h * central_fraction)
def pre_process_mobilenet(im, coreml=False):
# 參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py
# 裡的 preprocess_for_eval
im1 = central_crop_by_fraction(im, 0.875)
target_smallest_size = 224
im2 = im1.resize((target_smallest_size, target_smallest_size),
PIL.Image.BILINEAR)
arr = np.asarray(im2).astype(np.float32)
if not coreml:
arr /= 255.0
arr -= 0.5
arr *= 2.0
return arr
def pre_process(config, im, coreml=False):
model_name = get_model_name(config)
return {
'resnet_v2_50': pre_process_resnet,
'mobilenet_v1': pre_process_mobilenet,
}[model_name](im, coreml=coreml)
def get_model_name(config):
model_name = get_config_value(config, 'model_name')
return model_name
def test_inference_by_pb(config, pb_file_path=None, dataset_dir=None):
# http://www.cnblogs.com/arkenstone/p/7551270.html
filenames = [
('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'),
('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'),
# ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'),
]
for filename, label in filenames:
filename = dataset_dir_file(config, filename)
# image_np = cv2.imread(filename)
result = run_inference_on_file_pb(
config, filename, pb_file_path=pb_file_path,
dataset_dir=dataset_dir)
index = result['prediction_label']
print("Prediction label index:", index)
prediction_name = result['prediction_name']
print("Prediction name:", prediction_name)
print("Top 3 Prediction label index:", ' '.join(result['top_n_names']))
assert prediction_name == label
def dataset_dir_file(config, filename):
filename = os.path.join(get_dataset_dir(config), filename)
return filename
def run_inference_by_pb(config, image_np, pb_file_path=None):
checkpoint_dir_path = get_checkpoint_dir_path(config)
pb_file_path = pb_file_path or '%s/frozen_graph.pb' % checkpoint_dir_path
with tf.gfile.GFile(pb_file_path) as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return _run_inference_by_graph_def(config, graph_def, image_np)
def _run_inference_by_graph_def(config, graph_def, image_np,
enable_saliency_maps=False):
model_name = get_model_name(config)
image_size = 224
image_np = pre_process(config, image_np)
image_np = cv2.resize(image_np, (image_size, image_size))
# expand dims to shape [None, 299, 299, 3]
image_np = np.expand_dims(image_np, 0)
graph = tf.import_graph_def(graph_def, name='')
with tf.Session(graph=graph) as sess:
input_tensor_name = "input:0"
# output_tensor_name = "resnet_v2_50/predictions/Reshape_1:0"
output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[
model_name] + ":0"
input_tensor = sess.graph.get_tensor_by_name(
input_tensor_name) # get input tensor
output_tensor = sess.graph.get_tensor_by_name(
output_tensor_name) # get output tensor
tensor_map = {
'logits': output_tensor,
}
if enable_saliency_maps:
tensor_map['grad_imgs'] = sess.graph.get_tensor_by_name(
'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0')
result = sess.run(tensor_map, feed_dict={input_tensor: image_np})
return {
'logits': result['logits'],
'grad_imgs': result.get('grad_imgs'),
}
def test_inference_by_coreml(config, coreml_file_path=None, dataset_dir=None):
labels_to_names = read_label_file(get_dataset_dir(config))
dataset_dir = get_dataset_dir(config)
filenames = [
('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'),
('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'),
# ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'),
]
for filename, label in filenames:
filename = os.path.join(dataset_dir, filename)
image_np = PIL.Image.open(filename)
logits = run_inference_by_coreml(
config, image_np, coreml_file_path=coreml_file_path, )
print('logits', logits)
index = np.argmax(logits)
print("Prediction label index:", index)
prediction_name = labels_to_names[index]
print("Prediction name:", prediction_name)
index_list = np.argsort(logits)
print("Top 3 Prediction label index:",
index_list,
' '.join([labels_to_names[i] for i in list(index_list)]))
assert prediction_name == label
def run_inference_by_coreml(config, image_np, coreml_file_path=None):
import coremltools
import tfcoreml
model_name = get_model_name(config)
checkpoint_dir_path = get_checkpoint_dir_path(config)
frozen_model_file = '%s/frozen_graph.pb' % checkpoint_dir_path
coreml_model_file = coreml_file_path or '%s/plant.mlmodel' % checkpoint_dir_path
image_np = pre_process(config, image_np, coreml=True)
image = Image.fromarray(image_np.astype('int8'), 'RGB')
input_tensor_shapes = {
"input:0": [1, image_np.shape[0], image_np.shape[1],
3]} # batch size is 1
output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[model_name] + ":0"
coreml_model = coremltools.models.MLModel(coreml_model_file)
convert_model = False
# convert_model = True
if convert_model:
extra_args = {
'resnet_v2_50': {
'red_bias': -_R_MEAN,
'green_bias': -_G_MEAN,
'blue_bias': -_B_MEAN,
},
'mobilenet_v1': {
'red_bias': -1.0,
'green_bias': -1.0,
'blue_bias': -1.0,
'image_scale': 2.0 / 255.,
}
}[model_name]
coreml_model = tfcoreml.convert(
tf_model_path=frozen_model_file,
mlmodel_path=coreml_model_file.replace('.mlmodel',
'_test.mlmodel'),
input_name_shape_dict=input_tensor_shapes,
output_feature_names=[output_tensor_name],
image_input_names=['input:0'],
**extra_args
)
coreml_inputs = {'input__0': image}
coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=False)
# example output: 'resnet_v2_50__predictions__Reshape_1__0'
probs = coreml_output[
output_tensor_name.replace('/', '__').replace(':', '__')].flatten()
return probs
def run_inference_on_file_pb(config, filename, pb_file_path=None,
dataset_dir=None):
labels_to_names = read_label_file(get_dataset_dir(config))
image_np = PIL.Image.open(filename)
logits = run_inference_by_pb(config, image_np, pb_file_path=pb_file_path)[
'logits']
index = np.argmax(logits, 1)
prediction_name = labels_to_names[index[0]]
index_list = np.argsort(logits, 1)
top_n_names = list(reversed(
[labels_to_names[i] for i in list(index_list[0])]))
print('logits', logits)
result = {
'prediction_name': prediction_name,
'prediction_label': index[0],
'top_n_names': top_n_names,
'logits': logits.tolist(),
}
return result
def test_inference_by_model_files(config, dataset_dir=None,
frozen_graph_path=None,
coreml_file_path=None):
dataset_dir = dataset_dir or get_dataset_dir(config)
test_inference_by_pb(config, pb_file_path=frozen_graph_path,
dataset_dir=dataset_dir)
test_inference_by_coreml(config, coreml_file_path=coreml_file_path,
dataset_dir=dataset_dir)
def get_image_with_saliency_map(image_np, saliency):
image_np = np.copy(np.asarray(image_np))[:, :]
w, h = image_np.shape[0:2]
l = min(w, h)
saliency = cv2.resize(saliency, (l, l))
saliency = cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB)
canvas = image_np[:, :]
w_offset = int((w - l) / 2)
h_offset = int((h - l) / 2)
roi_img = canvas[w_offset:w_offset + l, h_offset:h_offset + l]
intensify_factor = 3
alpha = np.clip(1 - intensify_factor * saliency.astype(float) / 255, 0, 1)
paint = np.copy(1 - alpha) * 255
overlap = roi_img[paint > 128]
if overlap.mean() + overlap.std() > 128:
color = np.array([0, 0, 255]).astype(float) / 255 # blue
else:
color = np.array([255, 200, 0]).astype(float) / 255 # orange
paint[:, :] *= color
roi_img = cv2.multiply(alpha, roi_img.astype(float))
roi_img = cv2.add(paint * (1 - alpha), roi_img).astype(int)
canvas[w_offset:w_offset + l, h_offset:h_offset + l] = roi_img
return canvas
def test_frozen_graph_saliency_map(config):
checkpoint_dir = config['checkpoint_path']
dataset_dir = get_dataset_dir(config)
frozen_graph_path = os.path.join(checkpoint_dir, 'frozen_graph.pb')
filename = dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg')
labels_to_names = read_label_file(dataset_dir)
image_np = PIL.Image.open(filename)
results = run_inference_by_pb(config, image_np,
pb_file_path=frozen_graph_path)
logits = results['logits']
index = np.argmax(logits, 1)[0]
prediction_name = labels_to_names[index]
grad_imgs = results['grad_imgs']
saliency = deprocess_image(grad_imgs[0])
blend = get_image_with_saliency_map(image_np, saliency)
print(prediction_name)
plot_image_in_grids([
blend, image_np,
saliency,
], 2)
@click.group()
def cli():
pass
@cli.command()
@click.argument('config_file')
@click.option('--use_cached', is_flag=True)
def run_info(config_file, use_cached):
with open(config_file) as f:
config = yaml.load(f)
_run_info(config, use_cached=use_cached)
@cli.command()
@click.argument('config_file')
def test_models(config_file):
with open(config_file) as f:
config = yaml.load(f)
test_inference_by_model_files(config)
@cli.command()
@click.argument('config_file')
@click.option('--use_cached', is_flag=True)
def plot_roc(config_file, use_cached):
with open(config_file) as f:
config = yaml.load(f)
_roc_analysis(config, use_cached=use_cached)
@cli.command()
@click.argument('config_file')
@click.option('--use_cached', is_flag=True)
def saliency_maps(config_file, use_cached):
with open(config_file) as f:
config = yaml.load(f)
_run_saliency_maps(config, use_cached=use_cached)
@cli.command()
@click.argument('config_file')
@click.option('--use_cached', is_flag=True)
def confusion_matrix(config_file, use_cached):
with open(config_file) as f:
config = yaml.load(f)
keys = [
'confusion_matrix',
]
aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached)
checkpoint_dir_path = get_checkpoint_dir_path(config)
dataset_dir = get_dataset_dir(config)
labels_to_names = read_label_file(dataset_dir)
plot_confusion_matrix(aggregated['confusion_matrix'],
labels_to_names=labels_to_names,
save_dir=checkpoint_dir_path)
if __name__ == '__main__':
define_tf_flags()
cli()
|
[
"tensorflow.app.flags.DEFINE_float",
"yaml.load",
"click.option",
"collections.defaultdict",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.interp",
"tensorflow.app.flags.DEFINE_integer",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"datasets.dataset_factory.get_dataset",
"numpy.max",
"tensorflow.squeeze",
"preprocessing.preprocessing_factory.get_preprocessing",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"matplotlib.pyplot.subplot",
"os.makedirs",
"tensorflow.argmax",
"PIL.Image.open",
"numpy.array",
"numpy.abs",
"itertools.cycle",
"tensorflow.python_io.tf_record_iterator",
"tensorflow.train.Example",
"datasets.plants.read_label_file",
"click.group",
"math.ceil",
"matplotlib.use",
"tensorflow.import_graph_def",
"tensorflow.python.training.monitored_session.ChiefSessionCreator",
"numpy.matrix",
"click.argument",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.plot",
"numpy.expand_dims",
"matplotlib.font_manager.FontManager",
"tensorflow.Print",
"tensorflow.app.flags.DEFINE_string",
"matplotlib.pyplot.xlabel",
"tensorflow.losses.softmax_cross_entropy",
"coremltools.models.MLModel",
"matplotlib.pyplot.title",
"tensorflow.confusion_matrix",
"nets.nets_factory.get_network_fn",
"tensorflow.logging.info",
"numpy.argmax",
"numpy.clip",
"os.path.join",
"tensorflow.train.ExponentialMovingAverage",
"seaborn.axes_style",
"numpy.copy",
"tensorflow.gradients",
"tensorflow.GraphDef",
"cv2.resize",
"h5py.File",
"matplotlib.pyplot.get_cmap",
"tensorflow.summary.scalar",
"subprocess.check_output",
"matplotlib.pyplot.legend",
"sklearn.preprocessing.label_binarize",
"cv2.add",
"matplotlib.pyplot.savefig",
"seaborn.heatmap",
"tensorflow.train.latest_checkpoint",
"tensorflow.train.batch",
"tensorflow.nn.softmax",
"numpy.zeros_like",
"traceback.print_exc",
"tensorflow.python.training.monitored_session.MonitoredSession",
"collections.Counter",
"tensorflow.gfile.IsDirectory",
"numpy.asarray",
"tensorflow.Session",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"tensorflow.add_to_collection",
"os.environ.get",
"sklearn.metrics.auc"
] |
[((33480, 33493), 'click.group', 'click.group', ([], {}), '()\n', (33491, 33493), False, 'import click\n'), ((33532, 33561), 'click.argument', 'click.argument', (['"""config_file"""'], {}), "('config_file')\n", (33546, 33561), False, 'import click\n'), ((33563, 33605), 'click.option', 'click.option', (['"""--use_cached"""'], {'is_flag': '(True)'}), "('--use_cached', is_flag=True)\n", (33575, 33605), False, 'import click\n'), ((33772, 33801), 'click.argument', 'click.argument', (['"""config_file"""'], {}), "('config_file')\n", (33786, 33801), False, 'import click\n'), ((33956, 33985), 'click.argument', 'click.argument', (['"""config_file"""'], {}), "('config_file')\n", (33970, 33985), False, 'import click\n'), ((33987, 34029), 'click.option', 'click.option', (['"""--use_cached"""'], {'is_flag': '(True)'}), "('--use_cached', is_flag=True)\n", (33999, 34029), False, 'import click\n'), ((34200, 34229), 'click.argument', 'click.argument', (['"""config_file"""'], {}), "('config_file')\n", (34214, 34229), False, 'import click\n'), ((34231, 34273), 'click.option', 'click.option', (['"""--use_cached"""'], {'is_flag': '(True)'}), "('--use_cached', is_flag=True)\n", (34243, 34273), False, 'import click\n'), ((34454, 34483), 'click.argument', 'click.argument', (['"""config_file"""'], {}), "('config_file')\n", (34468, 34483), False, 'import click\n'), ((34485, 34527), 'click.option', 'click.option', (['"""--use_cached"""'], {'is_flag': '(True)'}), "('--use_cached', is_flag=True)\n", (34497, 34527), False, 'import click\n'), ((1415, 1444), 'os.environ.get', 'os.environ.get', (['"""DISPLAY"""', '""""""'], {}), "('DISPLAY', '')\n", (1429, 1444), False, 'import os\n'), ((1521, 1542), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1535, 1542), False, 'import matplotlib\n'), ((1852, 1949), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', 'BATCH_SIZE', '"""The number of samples in each batch."""'], {}), "('batch_size', BATCH_SIZE,\n 'The number of samples in each batch.')\n", (1879, 1949), True, 'import tensorflow as tf\n'), ((1959, 2072), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_num_batches"""', 'None', '"""Max number of batches to evaluate by default use all."""'], {}), "('max_num_batches', None,\n 'Max number of batches to evaluate by default use all.')\n", (1986, 2072), True, 'import tensorflow as tf\n'), ((2090, 2182), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""master"""', '""""""', '"""The address of the TensorFlow master to use."""'], {}), "('master', '',\n 'The address of the TensorFlow master to use.')\n", (2116, 2182), True, 'import tensorflow as tf\n'), ((2192, 2342), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""checkpoint_path"""', 'None', '"""The directory where the model was written to or an absolute path to a checkpoint file."""'], {}), "('checkpoint_path', None,\n 'The directory where the model was written to or an absolute path to a checkpoint file.'\n )\n", (2218, 2342), True, 'import tensorflow as tf\n'), ((2366, 2470), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""eval_dir"""', '"""/tmp/tfmodel/"""', '"""Directory where the results are saved to."""'], {}), "('eval_dir', '/tmp/tfmodel/',\n 'Directory where the results are saved to.')\n", (2392, 2470), True, 'import tensorflow as tf\n'), ((2488, 2604), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_preprocessing_threads"""', '(4)', '"""The number of threads used to create the batches."""'], {}), "('num_preprocessing_threads', 4,\n 'The number of threads used to create the batches.')\n", (2515, 2604), True, 'import tensorflow as tf\n'), ((2622, 2714), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset_name"""', '"""plants"""', '"""The name of the dataset to load."""'], {}), "('dataset_name', 'plants',\n 'The name of the dataset to load.')\n", (2648, 2714), True, 'import tensorflow as tf\n'), ((2724, 2827), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset_split_name"""', '"""validation"""', '"""The name of the train/test split."""'], {}), "('dataset_split_name', 'validation',\n 'The name of the train/test split.')\n", (2750, 2827), True, 'import tensorflow as tf\n'), ((2845, 2949), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset_dir"""', 'None', '"""The directory where the dataset files are stored."""'], {}), "('dataset_dir', None,\n 'The directory where the dataset files are stored.')\n", (2871, 2949), True, 'import tensorflow as tf\n'), ((2967, 3202), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""labels_offset"""', '(0)', '"""An offset for the labels in the dataset. This flag is primarily used to evaluate the VGG and ResNet architectures which do not use a background class for the ImageNet dataset."""'], {}), "('labels_offset', 0,\n 'An offset for the labels in the dataset. This flag is primarily used to evaluate the VGG and ResNet architectures which do not use a background class for the ImageNet dataset.'\n )\n", (2994, 3202), True, 'import tensorflow as tf\n'), ((3237, 3342), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""model_name"""', '"""mobilenet_v1"""', '"""The name of the architecture to evaluate."""'], {}), "('model_name', 'mobilenet_v1',\n 'The name of the architecture to evaluate.')\n", (3263, 3342), True, 'import tensorflow as tf\n'), ((3360, 3517), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""preprocessing_name"""', 'None', '"""The name of the preprocessing to use. If left as `None`, then the model_name flag is used."""'], {}), "('preprocessing_name', None,\n 'The name of the preprocessing to use. If left as `None`, then the model_name flag is used.'\n )\n", (3386, 3517), True, 'import tensorflow as tf\n'), ((3541, 3700), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""moving_average_decay"""', 'None', '"""The decay to use for the moving average.If left as None, then moving averages are not used."""'], {}), "('moving_average_decay', None,\n 'The decay to use for the moving average.If left as None, then moving averages are not used.'\n )\n", (3566, 3700), True, 'import tensorflow as tf\n'), ((3724, 3795), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""eval_image_size"""', 'None', '"""Eval image size"""'], {}), "('eval_image_size', None, 'Eval image size')\n", (3751, 3795), True, 'import tensorflow as tf\n'), ((4197, 4234), 'tensorflow.gfile.IsDirectory', 'tf.gfile.IsDirectory', (['checkpoint_path'], {}), '(checkpoint_path)\n', (4217, 4234), True, 'import tensorflow as tf\n'), ((4400, 4456), 'tensorflow.python_io.tf_record_iterator', 'tf.python_io.tf_record_iterator', ([], {'path': 'tfrecords_filename'}), '(path=tfrecords_filename)\n', (4431, 4456), True, 'import tensorflow as tf\n'), ((5083, 5173), 'datasets.dataset_factory.get_dataset', 'dataset_factory.get_dataset', (['FLAGS.dataset_name', 'FLAGS.dataset_split_name', 'dataset_dir'], {}), '(FLAGS.dataset_name, FLAGS.dataset_split_name,\n dataset_dir)\n', (5110, 5173), False, 'from datasets import dataset_factory\n'), ((5334, 5421), 'nets.nets_factory.get_network_fn', 'nets_factory.get_network_fn', (['model_name'], {'num_classes': 'num_classes', 'is_training': '(False)'}), '(model_name, num_classes=num_classes,\n is_training=False)\n', (5361, 5421), False, 'from nets import nets_factory\n'), ((6271, 6349), 'preprocessing.preprocessing_factory.get_preprocessing', 'preprocessing_factory.get_preprocessing', (['preprocessing_name'], {'is_training': '(False)'}), '(preprocessing_name, is_training=False)\n', (6310, 6349), False, 'from preprocessing import preprocessing_factory\n'), ((6544, 6720), 'tensorflow.train.batch', 'tf.train.batch', (['[image, label]'], {'batch_size': 'FLAGS.batch_size', 'num_threads': 'FLAGS.num_preprocessing_threads', 'allow_smaller_final_batch': '(True)', 'capacity': '(5 * FLAGS.batch_size)'}), '([image, label], batch_size=FLAGS.batch_size, num_threads=\n FLAGS.num_preprocessing_threads, allow_smaller_final_batch=True,\n capacity=5 * FLAGS.batch_size)\n', (6558, 6720), True, 'import tensorflow as tf\n'), ((7292, 7312), 'tensorflow.argmax', 'tf.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (7301, 7312), True, 'import tensorflow as tf\n'), ((7441, 7459), 'tensorflow.squeeze', 'tf.squeeze', (['labels'], {}), '(labels)\n', (7451, 7459), True, 'import tensorflow as tf\n'), ((8667, 8717), 'tensorflow.logging.info', 'tf.logging.info', (["('Evaluating %s' % checkpoint_path)"], {}), "('Evaluating %s' % checkpoint_path)\n", (8682, 8717), True, 'import tensorflow as tf\n'), ((8740, 8768), 'datasets.plants.read_label_file', 'read_label_file', (['dataset_dir'], {}), '(dataset_dir)\n', (8755, 8768), False, 'from datasets.plants import read_label_file\n'), ((8789, 8810), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (8802, 8810), True, 'import tensorflow as tf\n'), ((8844, 8942), 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', (['one_hot_predictions', 'logits'], {'label_smoothing': '(0.0)', 'weights': '(1.0)'}), '(one_hot_predictions, logits,\n label_smoothing=0.0, weights=1.0)\n', (8875, 8942), True, 'import tensorflow as tf\n'), ((9745, 9834), 'tensorflow.python.training.monitored_session.ChiefSessionCreator', 'monitored_session.ChiefSessionCreator', ([], {'checkpoint_filename_with_path': 'checkpoint_path'}), '(checkpoint_filename_with_path=\n checkpoint_path)\n', (9782, 9834), False, 'from tensorflow.python.training import monitored_session\n'), ((9934, 10001), 'tensorflow.python.training.monitored_session.MonitoredSession', 'monitored_session.MonitoredSession', ([], {'session_creator': 'session_creator'}), '(session_creator=session_creator)\n', (9968, 10001), False, 'from tensorflow.python.training import monitored_session\n'), ((10211, 10225), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10223, 10225), True, 'import matplotlib.pyplot as plt\n'), ((10537, 10559), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Accent"""'], {}), "('Accent')\n", (10549, 10559), True, 'import matplotlib.pyplot as plt\n'), ((10715, 10746), 'numpy.zeros_like', 'np.zeros_like', (['confusion_matrix'], {}), '(confusion_matrix)\n', (10728, 10746), True, 'import numpy as np\n'), ((11475, 11521), 'os.path.join', 'os.path.join', (['save_dir', '"""confusion_matrix.png"""'], {}), "(save_dir, 'confusion_matrix.png')\n", (11487, 11521), False, 'import os\n'), ((11526, 11547), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pic_path'], {}), '(pic_path)\n', (11537, 11547), True, 'import matplotlib.pyplot as plt\n'), ((11605, 11615), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11613, 11615), True, 'import matplotlib.pyplot as plt\n'), ((11720, 11733), 'matplotlib.font_manager.FontManager', 'FontManager', ([], {}), '()\n', (11731, 11733), False, 'from matplotlib.font_manager import FontManager\n'), ((11796, 11875), 'subprocess.check_output', 'subprocess.check_output', (['"""fc-list :lang=zh-tw -f "%{family}\n\\""""'], {'shell': '(True)'}), '("""fc-list :lang=zh-tw -f "%{family}\n\\"""", shell=True)\n', (11819, 11875), False, 'import subprocess\n'), ((12327, 12336), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (12333, 12336), True, 'import numpy as np\n'), ((12345, 12362), 'numpy.max', 'np.max', (['x'], {'axis': '(2)'}), '(x, axis=2)\n', (12351, 12362), True, 'import numpy as np\n'), ((12667, 12695), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (12677, 12695), True, 'import matplotlib.pyplot as plt\n'), ((13049, 13077), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (13059, 13077), True, 'import matplotlib.pyplot as plt\n'), ((16155, 16174), 'collections.Counter', 'Counter', (['all_labels'], {}), '(all_labels)\n', (16162, 16174), False, 'from collections import Counter\n'), ((16361, 16395), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (16373, 16395), False, 'import os\n'), ((16404, 16434), 'h5py.File', 'h5py.File', (['info_file_path', '"""w"""'], {}), "(info_file_path, 'w')\n", (16413, 16434), False, 'import h5py\n'), ((16606, 16640), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (16618, 16640), False, 'import os\n'), ((17161, 17177), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (17172, 17177), False, 'from collections import Iterable, defaultdict\n'), ((18377, 18424), 'sklearn.preprocessing.label_binarize', 'label_binarize', (['labels'], {'classes': 'possible_labels'}), '(labels, classes=possible_labels)\n', (18391, 18424), False, 'from sklearn.preprocessing import label_binarize\n'), ((18446, 18469), 'numpy.array', 'np.array', (['probabilities'], {}), '(probabilities)\n', (18454, 18469), True, 'import numpy as np\n'), ((19248, 19307), 'sklearn.metrics.auc', 'auc', (["fpr['highest_probability']", "tpr['highest_probability']"], {}), "(fpr['highest_probability'], tpr['highest_probability'])\n", (19251, 19307), False, 'from sklearn.metrics import roc_curve, auc\n'), ((19536, 19567), 'sklearn.metrics.auc', 'auc', (["fpr['micro']", "tpr['micro']"], {}), "(fpr['micro'], tpr['micro'])\n", (19539, 19567), False, 'from sklearn.metrics import roc_curve, auc\n'), ((19861, 19883), 'numpy.zeros_like', 'np.zeros_like', (['all_fpr'], {}), '(all_fpr)\n', (19874, 19883), True, 'import numpy as np\n'), ((20117, 20148), 'sklearn.metrics.auc', 'auc', (["fpr['macro']", "tpr['macro']"], {}), "(fpr['macro'], tpr['macro'])\n", (20120, 20148), False, 'from sklearn.metrics import roc_curve, auc\n'), ((20238, 20282), 'numpy.argmax', 'np.argmax', (['(tpr[key_series] - fpr[key_series])'], {}), '(tpr[key_series] - fpr[key_series])\n', (20247, 20282), True, 'import numpy as np\n'), ((20663, 20675), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20673, 20675), True, 'import matplotlib.pyplot as plt\n'), ((20690, 20737), 'itertools.cycle', 'cycle', (["['aqua', 'darkorange', 'cornflowerblue']"], {}), "(['aqua', 'darkorange', 'cornflowerblue'])\n", (20695, 20737), False, 'from itertools import cycle\n'), ((21322, 21342), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (21330, 21342), True, 'import matplotlib.pyplot as plt\n'), ((21347, 21368), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (21355, 21368), True, 'import matplotlib.pyplot as plt\n'), ((21373, 21406), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (21383, 21406), True, 'import matplotlib.pyplot as plt\n'), ((21411, 21443), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (21421, 21443), True, 'import matplotlib.pyplot as plt\n'), ((21448, 21470), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC curve"""'], {}), "('ROC curve')\n", (21457, 21470), True, 'import matplotlib.pyplot as plt\n'), ((21475, 21504), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (21485, 21504), True, 'import matplotlib.pyplot as plt\n'), ((21520, 21559), 'os.path.join', 'os.path.join', (['save_dir', '"""roc_curve.png"""'], {}), "(save_dir, 'roc_curve.png')\n", (21532, 21559), False, 'import os\n'), ((21564, 21585), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pic_path'], {}), '(pic_path)\n', (21575, 21585), True, 'import matplotlib.pyplot as plt\n'), ((21648, 21658), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21656, 21658), True, 'import matplotlib.pyplot as plt\n'), ((26251, 26297), 'cv2.resize', 'cv2.resize', (['image_np', '(image_size, image_size)'], {}), '(image_np, (image_size, image_size))\n', (26261, 26297), False, 'import cv2\n'), ((26360, 26387), 'numpy.expand_dims', 'np.expand_dims', (['image_np', '(0)'], {}), '(image_np, 0)\n', (26374, 26387), True, 'import numpy as np\n'), ((26401, 26440), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (26420, 26440), True, 'import tensorflow as tf\n'), ((29154, 29199), 'coremltools.models.MLModel', 'coremltools.models.MLModel', (['coreml_model_file'], {}), '(coreml_model_file)\n', (29180, 29199), False, 'import coremltools\n'), ((30575, 30599), 'PIL.Image.open', 'PIL.Image.open', (['filename'], {}), '(filename)\n', (30589, 30599), False, 'import PIL\n'), ((30709, 30729), 'numpy.argmax', 'np.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (30718, 30729), True, 'import numpy as np\n'), ((30795, 30816), 'numpy.argsort', 'np.argsort', (['logits', '(1)'], {}), '(logits, 1)\n', (30805, 30816), True, 'import numpy as np\n'), ((31777, 31805), 'cv2.resize', 'cv2.resize', (['saliency', '(l, l)'], {}), '(saliency, (l, l))\n', (31787, 31805), False, 'import cv2\n'), ((31821, 31863), 'cv2.cvtColor', 'cv2.cvtColor', (['saliency', 'cv2.COLOR_GRAY2RGB'], {}), '(saliency, cv2.COLOR_GRAY2RGB)\n', (31833, 31863), False, 'import cv2\n'), ((32785, 32832), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""frozen_graph.pb"""'], {}), "(checkpoint_dir, 'frozen_graph.pb')\n", (32797, 32832), False, 'import os\n'), ((32927, 32955), 'datasets.plants.read_label_file', 'read_label_file', (['dataset_dir'], {}), '(dataset_dir)\n', (32942, 32955), False, 'from datasets.plants import read_label_file\n'), ((32971, 32995), 'PIL.Image.open', 'PIL.Image.open', (['filename'], {}), '(filename)\n', (32985, 32995), False, 'import PIL\n'), ((34882, 34910), 'datasets.plants.read_label_file', 'read_label_file', (['dataset_dir'], {}), '(dataset_dir)\n', (34897, 34910), False, 'from datasets.plants import read_label_file\n'), ((4262, 4305), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_path'], {}), '(checkpoint_path)\n', (4288, 4305), True, 'import tensorflow as tf\n'), ((4536, 4554), 'tensorflow.train.Example', 'tf.train.Example', ([], {}), '()\n', (4552, 4554), True, 'import tensorflow as tf\n'), ((6928, 7005), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', (['FLAGS.moving_average_decay', 'tf_global_step'], {}), '(FLAGS.moving_average_decay, tf_global_step)\n', (6961, 7005), True, 'import tensorflow as tf\n'), ((7795, 7884), 'tensorflow.confusion_matrix', 'tf.confusion_matrix', ([], {'labels': 'labels', 'num_classes': 'num_classes', 'predictions': 'predictions'}), '(labels=labels, num_classes=num_classes, predictions=\n predictions)\n', (7814, 7884), True, 'import tensorflow as tf\n'), ((8155, 8209), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['summary_name', 'value'], {'collections': '[]'}), '(summary_name, value, collections=[])\n', (8172, 8209), True, 'import tensorflow as tf\n'), ((8223, 8258), 'tensorflow.Print', 'tf.Print', (['op', '[value]', 'summary_name'], {}), '(op, [value], summary_name)\n', (8231, 8258), True, 'import tensorflow as tf\n'), ((8267, 8315), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['tf.GraphKeys.SUMMARIES', 'op'], {}), '(tf.GraphKeys.SUMMARIES, op)\n', (8287, 8315), True, 'import tensorflow as tf\n'), ((8964, 9012), 'tensorflow.gradients', 'tf.gradients', (['softmax_cross_entropy_loss', 'images'], {}), '(softmax_cross_entropy_loss, images)\n', (8976, 9012), True, 'import tensorflow as tf\n'), ((10823, 10849), 'seaborn.axes_style', 'sns.axes_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (10837, 10849), True, 'import seaborn as sns\n'), ((10859, 11012), 'seaborn.heatmap', 'sns.heatmap', (['confusion_matrix'], {'linewidths': '(0.2)', 'linecolor': '"""#eeeeee"""', 'xticklabels': '(True)', 'yticklabels': '(True)', 'mask': 'mask', 'annot': '(False)', 'ax': 'ax', 'cmap': 'cmap'}), "(confusion_matrix, linewidths=0.2, linecolor='#eeeeee',\n xticklabels=True, yticklabels=True, mask=mask, annot=False, ax=ax, cmap\n =cmap)\n", (10870, 11012), True, 'import seaborn as sns\n'), ((12877, 12899), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (12888, 12899), True, 'import matplotlib.pyplot as plt\n'), ((12980, 12990), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12988, 12990), True, 'import matplotlib.pyplot as plt\n'), ((17195, 17216), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (17206, 17216), False, 'import os\n'), ((18784, 18819), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_binary[:, i]', 'y_scores'], {}), '(y_binary[:, i], y_scores)\n', (18793, 18819), False, 'from sklearn.metrics import roc_curve, auc\n'), ((18841, 18860), 'sklearn.metrics.auc', 'auc', (['fpr[i]', 'tpr[i]'], {}), '(fpr[i], tpr[i])\n', (18844, 18860), False, 'from sklearn.metrics import roc_curve, auc\n'), ((19935, 19969), 'numpy.interp', 'np.interp', (['all_fpr', 'fpr[i]', 'tpr[i]'], {}), '(all_fpr, fpr[i], tpr[i])\n', (19944, 19969), True, 'import numpy as np\n'), ((25820, 25848), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['pb_file_path'], {}), '(pb_file_path)\n', (25834, 25848), True, 'import tensorflow as tf\n'), ((25875, 25888), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (25886, 25888), True, 'import tensorflow as tf\n'), ((26450, 26473), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (26460, 26473), True, 'import tensorflow as tf\n'), ((27816, 27851), 'os.path.join', 'os.path.join', (['dataset_dir', 'filename'], {}), '(dataset_dir, filename)\n', (27828, 27851), False, 'import os\n'), ((27871, 27895), 'PIL.Image.open', 'PIL.Image.open', (['filename'], {}), '(filename)\n', (27885, 27895), False, 'import PIL\n'), ((28054, 28071), 'numpy.argmax', 'np.argmax', (['logits'], {}), '(logits)\n', (28063, 28071), True, 'import numpy as np\n'), ((28241, 28259), 'numpy.argsort', 'np.argsort', (['logits'], {}), '(logits)\n', (28251, 28259), True, 'import numpy as np\n'), ((32142, 32160), 'numpy.copy', 'np.copy', (['(1 - alpha)'], {}), '(1 - alpha)\n', (32149, 32160), True, 'import numpy as np\n'), ((33158, 33178), 'numpy.argmax', 'np.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (33167, 33178), True, 'import numpy as np\n'), ((33695, 33707), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (33704, 33707), False, 'import yaml\n'), ((33882, 33894), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (33891, 33894), False, 'import yaml\n'), ((34119, 34131), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (34128, 34131), False, 'import yaml\n'), ((34368, 34380), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (34377, 34380), False, 'import yaml\n'), ((34625, 34637), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (34634, 34637), False, 'import yaml\n'), ((4909, 4919), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4917, 4919), True, 'import tensorflow as tf\n'), ((12472, 12490), 'numpy.clip', 'np.clip', (['x', '(0)', '(255)'], {}), '(x, 0, 255)\n', (12479, 12490), True, 'import numpy as np\n'), ((12770, 12802), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_row', 'n_columns', 'i'], {}), '(n_row, n_columns, i)\n', (12781, 12802), True, 'import matplotlib.pyplot as plt\n'), ((12815, 12830), 'matplotlib.pyplot.imshow', 'plt.imshow', (['col'], {}), '(col)\n', (12825, 12830), True, 'import matplotlib.pyplot as plt\n'), ((16663, 16693), 'h5py.File', 'h5py.File', (['info_file_path', '"""r"""'], {}), "(info_file_path, 'r')\n", (16672, 16693), False, 'import h5py\n'), ((20957, 21014), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr[i]', 'tpr[i]'], {'color': 'color', 'lw': 'lw', 'label': 'label'}), '(fpr[i], tpr[i], color=color, lw=lw, label=label)\n', (20965, 21014), True, 'import matplotlib.pyplot as plt\n'), ((23399, 23414), 'numpy.asarray', 'np.asarray', (['im2'], {}), '(im2)\n', (23409, 23414), True, 'import numpy as np\n'), ((24113, 24128), 'numpy.asarray', 'np.asarray', (['im2'], {}), '(im2)\n', (24123, 24128), True, 'import numpy as np\n'), ((31684, 31704), 'numpy.asarray', 'np.asarray', (['image_np'], {}), '(image_np)\n', (31694, 31704), True, 'import numpy as np\n'), ((32491, 32528), 'cv2.add', 'cv2.add', (['(paint * (1 - alpha))', 'roi_img'], {}), '(paint * (1 - alpha), roi_img)\n', (32498, 32528), False, 'import cv2\n'), ((13802, 13824), 'math.ceil', 'math.ceil', (['num_batches'], {}), '(num_batches)\n', (13811, 13824), False, 'import math\n'), ((18562, 18592), 'numpy.max', 'np.max', (['y_score_matrix'], {'axis': '(1)'}), '(y_score_matrix, axis=1)\n', (18568, 18592), True, 'import numpy as np\n'), ((14236, 14257), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (14255, 14257), False, 'import traceback\n'), ((32263, 32284), 'numpy.array', 'np.array', (['[0, 0, 255]'], {}), '([0, 0, 255])\n', (32271, 32284), True, 'import numpy as np\n'), ((32339, 32362), 'numpy.array', 'np.array', (['[255, 200, 0]'], {}), '([255, 200, 0])\n', (32347, 32362), True, 'import numpy as np\n'), ((14473, 14489), 'numpy.matrix', 'np.matrix', (['value'], {}), '(value)\n', (14482, 14489), True, 'import numpy as np\n'), ((14557, 14573), 'numpy.matrix', 'np.matrix', (['value'], {}), '(value)\n', (14566, 14573), True, 'import numpy as np\n')]
|
from app import app
with app.test_client() as c:
response= c.get('/')
assert response.data == b'Hello World!'
assert response.status_code==200
|
[
"app.app.test_client"
] |
[((25, 42), 'app.app.test_client', 'app.test_client', ([], {}), '()\n', (40, 42), False, 'from app import app\n')]
|
"""Module to test assessment_status"""
from __future__ import unicode_literals # isort:skip
from datetime import datetime
from random import choice
from string import ascii_letters
from dateutil.relativedelta import relativedelta
from flask_webtest import SessionScope
import pytest
from sqlalchemy.orm.exc import NoResultFound
from portal.extensions import db
from portal.models.audit import Audit
from portal.models.clinical_constants import CC
from portal.models.encounter import Encounter
from portal.models.identifier import Identifier
from portal.models.intervention import INTERVENTION
from portal.models.organization import Organization
from portal.models.overall_status import OverallStatus
from portal.models.qb_status import QB_Status
from portal.models.qb_timeline import invalidate_users_QBT
from portal.models.questionnaire import Questionnaire
from portal.models.questionnaire_bank import (
QuestionnaireBank,
QuestionnaireBankQuestionnaire,
)
from portal.models.questionnaire_response import (
QuestionnaireResponse,
aggregate_responses,
qnr_document_id,
)
from portal.models.recur import Recur
from portal.models.research_protocol import ResearchProtocol
from portal.models.role import ROLE
from portal.models.user import get_user
from portal.system_uri import ICHOM
from tests import TEST_USER_ID, TestCase, associative_backdate
now = datetime.utcnow()
def mock_qr(
instrument_id, status='completed', timestamp=None, qb=None,
doc_id=None, iteration=None, user_id=TEST_USER_ID):
if not doc_id:
doc_id = ''.join(choice(ascii_letters) for _ in range(10))
timestamp = timestamp or datetime.utcnow()
qr_document = {
"questionnaire": {
"display": "Additional questions",
"reference":
"https://{}/api/questionnaires/{}".format(
'SERVER_NAME', instrument_id)},
"identifier": {
"use": "official",
"label": "cPRO survey session ID",
"value": doc_id,
"system": "https://stg-ae.us.truenth.org/eproms-demo"}
}
enc = Encounter(
status='planned', auth_method='url_authenticated', user_id=user_id,
start_time=timestamp)
with SessionScope(db):
db.session.add(enc)
db.session.commit()
enc = db.session.merge(enc)
if not qb:
qstats = QB_Status(get_user(user_id), timestamp)
qbd = qstats.current_qbd()
qb, iteration = qbd.questionnaire_bank, qbd.iteration
qr = QuestionnaireResponse(
subject_id=user_id,
status=status,
authored=timestamp,
document=qr_document,
encounter_id=enc.id,
questionnaire_bank=qb,
qb_iteration=iteration)
with SessionScope(db):
db.session.add(qr)
db.session.commit()
invalidate_users_QBT(user_id=user_id)
localized_instruments = {'eproms_add', 'epic26', 'comorb'}
metastatic_baseline_instruments = {
'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'}
metastatic_indefinite_instruments = {'irondemog'}
metastatic_3 = {
'eortc', 'eproms_add', 'ironmisc'}
metastatic_4 = {
'eortc', 'eproms_add', 'ironmisc', 'factfpsi'}
metastatic_6 = {
'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'}
symptom_tracker_instruments = {'epic26', 'eq5d', 'maxpc', 'pam'}
def mock_questionnairebanks(eproms_or_tnth):
"""Create a series of near real world questionnaire banks
:param eproms_or_tnth: controls which set of questionnairebanks are
generated. As restrictions exist, such as two QBs with the same
classification can't have the same instrument, it doesn't work to mix
them.
"""
if eproms_or_tnth == 'eproms':
return mock_eproms_questionnairebanks()
elif eproms_or_tnth == 'tnth':
return mock_tnth_questionnairebanks()
else:
raise ValueError('expecting `eproms` or `tntn`, not `{}`'.format(
eproms_or_tnth))
def mock_eproms_questionnairebanks():
# Define base ResearchProtocols
localized_protocol = ResearchProtocol(name='localized_protocol')
metastatic_protocol = ResearchProtocol(name='metastatic_protocol')
with SessionScope(db):
db.session.add(localized_protocol)
db.session.add(metastatic_protocol)
db.session.commit()
localized_protocol = db.session.merge(localized_protocol)
metastatic_protocol = db.session.merge(metastatic_protocol)
locpro_id = localized_protocol.id
metapro_id = metastatic_protocol.id
# Define test Orgs and QuestionnaireBanks for each group
localized_org = Organization(name='localized')
localized_org.research_protocols.append(localized_protocol)
metastatic_org = Organization(name='metastatic')
metastatic_org.research_protocols.append(metastatic_protocol)
# from https://docs.google.com/spreadsheets/d/\
# 1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238
three_q_recur = Recur(
start='{"months": 3}', cycle_length='{"months": 6}',
termination='{"months": 24}')
four_q_recur = Recur(
start='{"months": 6}', cycle_length='{"years": 1}',
termination='{"months": 33}')
six_q_recur = Recur(
start='{"years": 1}', cycle_length='{"years": 1}',
termination='{"years": 3, "months": 3}')
for name in (localized_instruments.union(*(
metastatic_baseline_instruments,
metastatic_indefinite_instruments,
metastatic_3,
metastatic_4,
metastatic_6))):
TestCase.add_questionnaire(name=name)
with SessionScope(db):
db.session.add(localized_org)
db.session.add(metastatic_org)
db.session.add(three_q_recur)
db.session.add(four_q_recur)
db.session.add(six_q_recur)
db.session.commit()
localized_org, metastatic_org = map(
db.session.merge, (localized_org, metastatic_org))
three_q_recur = db.session.merge(three_q_recur)
four_q_recur = db.session.merge(four_q_recur)
six_q_recur = db.session.merge(six_q_recur)
# Localized baseline
l_qb = QuestionnaireBank(
name='localized',
classification='baseline',
research_protocol_id=locpro_id,
start='{"days": 0}',
overdue='{"days": 7}',
expired='{"months": 3}')
for rank, instrument in enumerate(localized_instruments):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
l_qb.questionnaires.append(qbq)
# Metastatic baseline
mb_qb = QuestionnaireBank(
name='metastatic',
classification='baseline',
research_protocol_id=metapro_id,
start='{"days": 0}',
overdue='{"days": 30}',
expired='{"months": 3}')
for rank, instrument in enumerate(metastatic_baseline_instruments):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
mb_qb.questionnaires.append(qbq)
# Metastatic indefinite
mi_qb = QuestionnaireBank(
name='metastatic_indefinite',
classification='indefinite',
research_protocol_id=metapro_id,
start='{"days": 0}',
expired='{"years": 50}')
for rank, instrument in enumerate(metastatic_indefinite_instruments):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
mi_qb.questionnaires.append(qbq)
# Metastatic recurring 3
mr3_qb = QuestionnaireBank(
name='metastatic_recurring3',
classification='recurring',
research_protocol_id=metapro_id,
start='{"days": 0}',
overdue='{"days": 30}',
expired='{"months": 3}',
recurs=[three_q_recur])
for rank, instrument in enumerate(metastatic_3):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
mr3_qb.questionnaires.append(qbq)
# Metastatic recurring 4
mr4_qb = QuestionnaireBank(
name='metastatic_recurring4',
classification='recurring',
research_protocol_id=metapro_id,
recurs=[four_q_recur],
start='{"days": 0}',
overdue='{"days": 30}',
expired='{"months": 3}')
for rank, instrument in enumerate(metastatic_4):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
mr4_qb.questionnaires.append(qbq)
# Metastatic recurring 6
mr6_qb = QuestionnaireBank(
name='metastatic_recurring6',
classification='recurring',
research_protocol_id=metapro_id,
recurs=[six_q_recur],
start='{"days": 0}',
overdue='{"days": 30}',
expired='{"months": 3}')
for rank, instrument in enumerate(metastatic_6):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
mr6_qb.questionnaires.append(qbq)
with SessionScope(db):
db.session.add(l_qb)
db.session.add(mb_qb)
db.session.add(mi_qb)
db.session.add(mr3_qb)
db.session.add(mr4_qb)
db.session.add(mr6_qb)
db.session.commit()
def mock_tnth_questionnairebanks():
for name in (symptom_tracker_instruments):
TestCase.add_questionnaire(name=name)
# Symptom Tracker Baseline
self_management = INTERVENTION.SELF_MANAGEMENT
st_qb = QuestionnaireBank(
name='symptom_tracker',
classification='baseline',
intervention_id=self_management.id,
start='{"days": 0}',
expired='{"months": 3}'
)
for rank, instrument in enumerate(symptom_tracker_instruments):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
st_qb.questionnaires.append(qbq)
# Symptom Tracker Recurrence
st_recur = Recur(
start='{"months": 3}', cycle_length='{"months": 3}',
termination='{"months": 27}')
with SessionScope(db):
db.session.add(st_qb)
db.session.add(st_recur)
db.session.commit()
self_management = INTERVENTION.SELF_MANAGEMENT
st_recur_qb = QuestionnaireBank(
name='symptom_tracker_recurring',
classification='recurring',
intervention_id=self_management.id,
start='{"days": 0}',
expired='{"months": 3}',
recurs=[st_recur]
)
for rank, instrument in enumerate(symptom_tracker_instruments):
q = Questionnaire.find_by_name(name=instrument)
qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)
st_recur_qb.questionnaires.append(qbq)
with SessionScope(db):
db.session.add(st_recur_qb)
db.session.commit()
class TestQuestionnaireSetup(TestCase):
"Base for test classes needing mock questionnaire setup"
eproms_or_tnth = 'eproms' # modify in child class to test `tnth`
def setUp(self):
super(TestQuestionnaireSetup, self).setUp()
mock_questionnairebanks(self.eproms_or_tnth)
class TestAggregateResponses(TestQuestionnaireSetup):
def test_aggregate_response_timepoints(self):
# generate a few mock qr's from various qb iterations, confirm
# time points.
nineback, nowish = associative_backdate(
now=now, backdate=relativedelta(months=9, hours=1))
self.bless_with_basics(
setdate=nineback, local_metastatic='metastatic')
instrument_id = 'eortc'
for months_back in (0, 3, 6, 9):
backdate, _ = associative_backdate(
now=now, backdate=relativedelta(months=months_back))
mock_qr(instrument_id=instrument_id, timestamp=backdate)
# add staff user w/ same org association for bundle creation
staff = self.add_user(username='staff')
staff.organizations.append(Organization.query.filter(
Organization.name == 'metastatic').one())
self.promote_user(staff, role_name=ROLE.STAFF.value)
staff = db.session.merge(staff)
bundle = aggregate_responses(
instrument_ids=[instrument_id], current_user=staff)
expected = {'Baseline', 'Month 3', 'Month 6', 'Month 9'}
found = [i['timepoint'] for i in bundle['entry']]
assert set(found) == expected
def test_site_ids(self):
# bless org w/ expected identifier type
wanted_system = 'http://pcctc.org/'
unwanted_system = 'http://other.org/'
self.app.config['REPORTING_IDENTIFIER_SYSTEMS'] = [wanted_system]
id_value = '146-11'
org = Organization.query.filter(
Organization.name == 'metastatic').one()
id1 = Identifier(
system=wanted_system, use='secondary', value=id_value)
id2 = Identifier(
system=unwanted_system, use='secondary', value=id_value)
org.identifiers.append(id1)
org.identifiers.append(id2)
with SessionScope(db):
db.session.commit()
nineback, nowish = associative_backdate(
now=now, backdate=relativedelta(months=9, hours=1))
self.bless_with_basics(
setdate=nineback, local_metastatic='metastatic')
instrument_id = 'eortc'
mock_qr(instrument_id=instrument_id)
# add staff user w/ same org association for bundle creation
staff = self.add_user(username='staff')
staff.organizations.append(Organization.query.filter(
Organization.name == 'metastatic').one())
self.promote_user(staff, role_name=ROLE.STAFF.value)
staff = db.session.merge(staff)
bundle = aggregate_responses(
instrument_ids=[instrument_id], current_user=staff)
id1 = db.session.merge(id1)
assert 1 == len(bundle['entry'])
assert (1 ==
len(bundle['entry'][0]['subject']['careProvider']))
assert (1 ==
len(bundle['entry'][0]['subject']['careProvider'][0]
['identifier']))
assert (id1.as_fhir() ==
bundle['entry'][0]['subject']['careProvider'][0]
['identifier'][0])
class TestQB_Status(TestQuestionnaireSetup):
def test_qnr_id(self):
qb = QuestionnaireBank.query.first()
mock_qr(
instrument_id='irondemog',
status='in-progress', qb=qb,
doc_id='two11')
qb = db.session.merge(qb)
result = qnr_document_id(
subject_id=TEST_USER_ID,
questionnaire_bank_id=qb.id,
questionnaire_name='irondemog',
iteration=None,
status='in-progress')
assert result == 'two11'
def test_qnr_id_missing(self):
qb = QuestionnaireBank.query.first()
qb = db.session.merge(qb)
with pytest.raises(NoResultFound):
result = qnr_document_id(
subject_id=TEST_USER_ID,
questionnaire_bank_id=qb.id,
questionnaire_name='irondemog',
iteration=None,
status='in-progress')
def test_enrolled_in_metastatic(self):
"""metastatic should include baseline and indefinite"""
self.bless_with_basics(local_metastatic='metastatic')
user = db.session.merge(self.test_user)
a_s = QB_Status(user=user, as_of_date=now)
assert a_s.enrolled_in_classification('baseline')
assert a_s.enrolled_in_classification('indefinite')
def test_enrolled_in_localized(self):
"""localized should include baseline but not indefinite"""
self.bless_with_basics(local_metastatic='localized')
user = db.session.merge(self.test_user)
a_s = QB_Status(user=user, as_of_date=now)
assert a_s.enrolled_in_classification('baseline')
assert not a_s.enrolled_in_classification('indefinite')
def test_localized_using_org(self):
self.bless_with_basics(local_metastatic='localized', setdate=now)
self.test_user = db.session.merge(self.test_user)
# confirm appropriate instruments
a_s = QB_Status(user=self.test_user, as_of_date=now)
assert (set(a_s.instruments_needing_full_assessment()) ==
localized_instruments)
def test_localized_on_time(self):
# User finished both on time
self.bless_with_basics(local_metastatic='localized', setdate=now)
mock_qr(instrument_id='eproms_add', timestamp=now)
mock_qr(instrument_id='epic26', timestamp=now)
mock_qr(instrument_id='comorb', timestamp=now)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=now)
assert a_s.overall_status == OverallStatus.completed
# confirm appropriate instruments
assert not a_s.instruments_needing_full_assessment('all')
def test_localized_inprogress_on_time(self):
# User finished both on time
self.bless_with_basics(local_metastatic='localized', setdate=now)
mock_qr(
instrument_id='eproms_add', status='in-progress',
doc_id='eproms_add', timestamp=now)
mock_qr(
instrument_id='epic26', status='in-progress', doc_id='epic26',
timestamp=now)
mock_qr(
instrument_id='comorb', status='in-progress', doc_id='comorb',
timestamp=now)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=now)
assert a_s.overall_status == OverallStatus.in_progress
# confirm appropriate instruments
assert not a_s.instruments_needing_full_assessment()
assert set(a_s.instruments_in_progress()) == localized_instruments
def test_localized_in_process(self):
# User finished one, time remains for other
self.bless_with_basics(local_metastatic='localized', setdate=now)
mock_qr(instrument_id='eproms_add', timestamp=now)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=now)
assert a_s.overall_status == OverallStatus.in_progress
# confirm appropriate instruments
assert (localized_instruments -
set(a_s.instruments_needing_full_assessment('all')) ==
{'eproms_add'})
assert not a_s.instruments_in_progress()
def test_metastatic_on_time(self):
# User finished both on time
self.bless_with_basics(
local_metastatic='metastatic', setdate=now)
for i in metastatic_baseline_instruments:
mock_qr(instrument_id=i, timestamp=now)
mi_qb = QuestionnaireBank.query.filter_by(
name='metastatic_indefinite').first()
mock_qr(instrument_id='irondemog', qb=mi_qb, timestamp=now)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=now)
assert a_s.overall_status == OverallStatus.completed
# shouldn't need full or any inprocess
assert not a_s.instruments_needing_full_assessment('all')
assert not a_s.instruments_in_progress('all')
def test_metastatic_due(self):
# hasn't taken, but still in OverallStatus.due period
self.bless_with_basics(local_metastatic='metastatic', setdate=now)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=now)
assert a_s.overall_status == OverallStatus.due
# confirm list of expected intruments needing attention
assert (metastatic_baseline_instruments ==
set(a_s.instruments_needing_full_assessment()))
assert not a_s.instruments_in_progress()
# metastatic indefinite should also be 'due'
assert (metastatic_indefinite_instruments ==
set(a_s.instruments_needing_full_assessment('indefinite')))
assert not a_s.instruments_in_progress('indefinite')
def test_localized_overdue(self):
# if the user completed something on time, and nothing else
# is due, should see the thank you message.
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3, hours=1))
self.bless_with_basics(
setdate=backdate, local_metastatic='localized')
# backdate so the baseline q's have expired
mock_qr(
instrument_id='epic26', status='in-progress', timestamp=backdate)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.partially_completed
# with all q's expired,
# instruments_needing_full_assessment and instruments_in_progress
# should be empty
assert not a_s.instruments_needing_full_assessment()
assert not a_s.instruments_in_progress()
def test_localized_as_of_date(self):
# backdating consent beyond expired and the status lookup date
# within a valid window should show available assessments.
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3))
self.bless_with_basics(
setdate=backdate, local_metastatic='localized')
# backdate so the baseline q's have expired
mock_qr(instrument_id='epic26', status='in-progress', doc_id='doc-26',
timestamp=backdate)
self.test_user = db.session.merge(self.test_user)
as_of_date = backdate + relativedelta(days=2)
a_s = QB_Status(user=self.test_user, as_of_date=as_of_date)
assert a_s.overall_status == OverallStatus.in_progress
# with only epic26 started, should see results for both
# instruments_needing_full_assessment and instruments_in_progress
assert ({'eproms_add', 'comorb'} ==
set(a_s.instruments_needing_full_assessment()))
assert ['doc-26'] == a_s.instruments_in_progress()
def test_metastatic_as_of_date(self):
# backdating consent beyond expired and the status lookup date
# within a valid window should show available assessments.
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3))
self.bless_with_basics(setdate=backdate, local_metastatic='metastatic')
# backdate so the baseline q's have expired
mock_qr(instrument_id='epic23', status='in-progress', doc_id='doc-23',
timestamp=backdate)
self.test_user = db.session.merge(self.test_user)
as_of_date = backdate + relativedelta(days=2)
a_s = QB_Status(user=self.test_user, as_of_date=as_of_date)
assert a_s.overall_status == OverallStatus.in_progress
# with only epic26 started, should see results for both
# instruments_needing_full_assessment and instruments_in_progress
assert ['doc-23'] == a_s.instruments_in_progress()
assert a_s.instruments_needing_full_assessment()
def test_initial_recur_due(self):
# backdate so baseline q's have expired, and we within the first
# recurrence window
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3, hours=1))
self.bless_with_basics(
setdate=backdate, local_metastatic='metastatic')
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.due
# in the initial window w/ no questionnaires submitted
# should include all from initial recur
assert (set(a_s.instruments_needing_full_assessment()) ==
metastatic_3)
# confirm iteration 0
assert a_s.current_qbd().iteration == 0
def test_2nd_recur_due(self):
# backdate so baseline q's have expired, and we within the 2nd
# recurrence window
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=9, hours=1))
self.bless_with_basics(
setdate=backdate, local_metastatic='metastatic')
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.due
# in the initial window w/ no questionnaires submitted
# should include all from initial recur
assert set(a_s.instruments_needing_full_assessment()) == metastatic_3
# however, we should be looking at iteration 2 (zero index)!
assert a_s.current_qbd().iteration == 1
def test_initial_recur_baseline_done(self):
# backdate to be within the first recurrence window
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3, days=2))
self.bless_with_basics(
setdate=backdate, local_metastatic='metastatic')
# add baseline QNRs, as if submitted nearly 3 months ago, during
# baseline window
backdated = nowish - relativedelta(months=2, days=25)
baseline = QuestionnaireBank.query.filter_by(
name='metastatic').one()
for instrument in metastatic_baseline_instruments:
mock_qr(instrument, qb=baseline, timestamp=backdated)
self.test_user = db.session.merge(self.test_user)
# Check status during baseline window
a_s_baseline = QB_Status(
user=self.test_user, as_of_date=backdated)
assert a_s_baseline.overall_status == OverallStatus.completed
assert not a_s_baseline.instruments_needing_full_assessment()
# Whereas "current" status for the initial recurrence show due.
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.due
# in the initial window w/ no questionnaires submitted
# should include all from initial recur
assert set(a_s.instruments_needing_full_assessment()) == metastatic_3
def test_secondary_recur_due(self):
# backdate so baseline q's have expired, and we are within the
# second recurrence window
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=6, hours=1))
self.bless_with_basics(
setdate=backdate, local_metastatic='metastatic')
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.due
# w/ no questionnaires submitted
# should include all from second recur
assert set(a_s.instruments_needing_full_assessment()) == metastatic_4
def test_batch_lookup(self):
self.login()
self.bless_with_basics()
response = self.client.get(
'/api/consent-assessment-status?user_id=1&user_id=2')
assert response.status_code == 200
assert len(response.json['status']) == 1
assert (
response.json['status'][0]['consents'][0]['assessment_status'] ==
str(OverallStatus.expired))
def test_none_org(self):
# check users w/ none of the above org
self.test_user = db.session.merge(self.test_user)
self.test_user.organizations.append(Organization.query.get(0))
self.login()
self.bless_with_basics(
local_metastatic='metastatic', setdate=now)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=now)
assert a_s.overall_status == OverallStatus.due
def test_boundary_overdue(self):
self.login()
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3, hours=-1))
self.bless_with_basics(
setdate=backdate, local_metastatic='localized')
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.overdue
def test_boundary_expired(self):
"At expired, should be expired"
self.login()
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3, hours=1))
self.bless_with_basics(
setdate=backdate, local_metastatic='localized')
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.expired
def test_boundary_in_progress(self):
self.login()
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3, hours=-1))
self.bless_with_basics(
setdate=backdate, local_metastatic='localized')
for instrument in localized_instruments:
mock_qr(
instrument_id=instrument, status='in-progress',
timestamp=nowish)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.in_progress
def test_boundary_recurring_in_progress(self):
self.login()
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=6, hours=-1))
self.bless_with_basics(
setdate=backdate, local_metastatic='metastatic')
mr3_qb = QuestionnaireBank.query.filter_by(
name='metastatic_recurring3').first()
for instrument in metastatic_3:
mock_qr(
instrument_id=instrument, status='in-progress',
qb=mr3_qb, timestamp=nowish, iteration=0)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.in_progress
def test_boundary_in_progress_expired(self):
self.login()
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=3, hours=1))
self.bless_with_basics(
setdate=backdate, local_metastatic='localized')
for instrument in localized_instruments:
mock_qr(
instrument_id=instrument, status='in-progress',
timestamp=nowish-relativedelta(days=1))
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.partially_completed
def test_all_expired_old_tx(self):
self.login()
# backdate outside of baseline window (which uses consent date)
backdate, nowish = associative_backdate(
now=now, backdate=relativedelta(months=4, hours=1))
self.bless_with_basics(
setdate=backdate, local_metastatic='localized')
# provide treatment date outside of all recurrences
tx_date = datetime(2000, 3, 12, 0, 0, 00, 000000)
self.add_procedure(code='7', display='Focal therapy',
system=ICHOM, setdate=tx_date)
self.test_user = db.session.merge(self.test_user)
a_s = QB_Status(user=self.test_user, as_of_date=nowish)
assert a_s.overall_status == OverallStatus.expired
class TestTnthQB_Status(TestQuestionnaireSetup):
"""Tests with Tnth QuestionnaireBanks"""
eproms_or_tnth = 'tnth'
def test_no_start_date(self):
# W/O a biopsy (i.e. event start date), no questionnaries
self.promote_user(role_name=ROLE.PATIENT.value)
# toggle default setup - set biopsy false for test user
self.login()
self.test_user = db.session.merge(self.test_user)
self.test_user.save_observation(
codeable_concept=CC.BIOPSY, value_quantity=CC.FALSE_VALUE,
audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID),
status='final', issued=now)
qstats = QB_Status(self.test_user, now)
assert not qstats.current_qbd()
assert not qstats.enrolled_in_classification("baseline")
|
[
"portal.models.questionnaire_bank.QuestionnaireBank",
"datetime.datetime.utcnow",
"portal.models.identifier.Identifier",
"portal.models.encounter.Encounter",
"portal.models.organization.Organization.query.filter",
"portal.models.qb_status.QB_Status",
"portal.extensions.db.session.merge",
"portal.models.organization.Organization",
"portal.models.questionnaire.Questionnaire.find_by_name",
"dateutil.relativedelta.relativedelta",
"portal.models.questionnaire_response.aggregate_responses",
"pytest.raises",
"portal.models.user.get_user",
"portal.models.questionnaire_bank.QuestionnaireBankQuestionnaire",
"portal.models.questionnaire_bank.QuestionnaireBank.query.filter_by",
"portal.models.research_protocol.ResearchProtocol",
"portal.extensions.db.session.commit",
"portal.models.questionnaire_response.qnr_document_id",
"datetime.datetime",
"portal.models.questionnaire_bank.QuestionnaireBank.query.first",
"flask_webtest.SessionScope",
"portal.models.organization.Organization.query.get",
"portal.models.recur.Recur",
"portal.models.audit.Audit",
"random.choice",
"tests.TestCase.add_questionnaire",
"portal.models.qb_timeline.invalidate_users_QBT",
"portal.extensions.db.session.add",
"portal.models.questionnaire_response.QuestionnaireResponse"
] |
[((1378, 1395), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1393, 1395), False, 'from datetime import datetime\n'), ((2117, 2221), 'portal.models.encounter.Encounter', 'Encounter', ([], {'status': '"""planned"""', 'auth_method': '"""url_authenticated"""', 'user_id': 'user_id', 'start_time': 'timestamp'}), "(status='planned', auth_method='url_authenticated', user_id=\n user_id, start_time=timestamp)\n", (2126, 2221), False, 'from portal.models.encounter import Encounter\n'), ((2327, 2348), 'portal.extensions.db.session.merge', 'db.session.merge', (['enc'], {}), '(enc)\n', (2343, 2348), False, 'from portal.extensions import db\n'), ((2528, 2702), 'portal.models.questionnaire_response.QuestionnaireResponse', 'QuestionnaireResponse', ([], {'subject_id': 'user_id', 'status': 'status', 'authored': 'timestamp', 'document': 'qr_document', 'encounter_id': 'enc.id', 'questionnaire_bank': 'qb', 'qb_iteration': 'iteration'}), '(subject_id=user_id, status=status, authored=timestamp,\n document=qr_document, encounter_id=enc.id, questionnaire_bank=qb,\n qb_iteration=iteration)\n', (2549, 2702), False, 'from portal.models.questionnaire_response import QuestionnaireResponse, aggregate_responses, qnr_document_id\n'), ((2838, 2875), 'portal.models.qb_timeline.invalidate_users_QBT', 'invalidate_users_QBT', ([], {'user_id': 'user_id'}), '(user_id=user_id)\n', (2858, 2875), False, 'from portal.models.qb_timeline import invalidate_users_QBT\n'), ((4103, 4146), 'portal.models.research_protocol.ResearchProtocol', 'ResearchProtocol', ([], {'name': '"""localized_protocol"""'}), "(name='localized_protocol')\n", (4119, 4146), False, 'from portal.models.research_protocol import ResearchProtocol\n'), ((4173, 4217), 'portal.models.research_protocol.ResearchProtocol', 'ResearchProtocol', ([], {'name': '"""metastatic_protocol"""'}), "(name='metastatic_protocol')\n", (4189, 4217), False, 'from portal.models.research_protocol import ResearchProtocol\n'), ((4385, 4421), 'portal.extensions.db.session.merge', 'db.session.merge', (['localized_protocol'], {}), '(localized_protocol)\n', (4401, 4421), False, 'from portal.extensions import db\n'), ((4448, 4485), 'portal.extensions.db.session.merge', 'db.session.merge', (['metastatic_protocol'], {}), '(metastatic_protocol)\n', (4464, 4485), False, 'from portal.extensions import db\n'), ((4646, 4676), 'portal.models.organization.Organization', 'Organization', ([], {'name': '"""localized"""'}), "(name='localized')\n", (4658, 4676), False, 'from portal.models.organization import Organization\n'), ((4762, 4793), 'portal.models.organization.Organization', 'Organization', ([], {'name': '"""metastatic"""'}), "(name='metastatic')\n", (4774, 4793), False, 'from portal.models.organization import Organization\n'), ((5004, 5097), 'portal.models.recur.Recur', 'Recur', ([], {'start': '"""{"months": 3}"""', 'cycle_length': '"""{"months": 6}"""', 'termination': '"""{"months": 24}"""'}), '(start=\'{"months": 3}\', cycle_length=\'{"months": 6}\', termination=\n \'{"months": 24}\')\n', (5009, 5097), False, 'from portal.models.recur import Recur\n'), ((5129, 5221), 'portal.models.recur.Recur', 'Recur', ([], {'start': '"""{"months": 6}"""', 'cycle_length': '"""{"years": 1}"""', 'termination': '"""{"months": 33}"""'}), '(start=\'{"months": 6}\', cycle_length=\'{"years": 1}\', termination=\n \'{"months": 33}\')\n', (5134, 5221), False, 'from portal.models.recur import Recur\n'), ((5252, 5354), 'portal.models.recur.Recur', 'Recur', ([], {'start': '"""{"years": 1}"""', 'cycle_length': '"""{"years": 1}"""', 'termination': '"""{"years": 3, "months": 3}"""'}), '(start=\'{"years": 1}\', cycle_length=\'{"years": 1}\', termination=\n \'{"years": 3, "months": 3}\')\n', (5257, 5354), False, 'from portal.models.recur import Recur\n'), ((5999, 6030), 'portal.extensions.db.session.merge', 'db.session.merge', (['three_q_recur'], {}), '(three_q_recur)\n', (6015, 6030), False, 'from portal.extensions import db\n'), ((6050, 6080), 'portal.extensions.db.session.merge', 'db.session.merge', (['four_q_recur'], {}), '(four_q_recur)\n', (6066, 6080), False, 'from portal.extensions import db\n'), ((6099, 6128), 'portal.extensions.db.session.merge', 'db.session.merge', (['six_q_recur'], {}), '(six_q_recur)\n', (6115, 6128), False, 'from portal.extensions import db\n'), ((6166, 6338), 'portal.models.questionnaire_bank.QuestionnaireBank', 'QuestionnaireBank', ([], {'name': '"""localized"""', 'classification': '"""baseline"""', 'research_protocol_id': 'locpro_id', 'start': '"""{"days": 0}"""', 'overdue': '"""{"days": 7}"""', 'expired': '"""{"months": 3}"""'}), '(name=\'localized\', classification=\'baseline\',\n research_protocol_id=locpro_id, start=\'{"days": 0}\', overdue=\n \'{"days": 7}\', expired=\'{"months": 3}\')\n', (6183, 6338), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((6649, 6824), 'portal.models.questionnaire_bank.QuestionnaireBank', 'QuestionnaireBank', ([], {'name': '"""metastatic"""', 'classification': '"""baseline"""', 'research_protocol_id': 'metapro_id', 'start': '"""{"days": 0}"""', 'overdue': '"""{"days": 30}"""', 'expired': '"""{"months": 3}"""'}), '(name=\'metastatic\', classification=\'baseline\',\n research_protocol_id=metapro_id, start=\'{"days": 0}\', overdue=\n \'{"days": 30}\', expired=\'{"months": 3}\')\n', (6666, 6824), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((7148, 7312), 'portal.models.questionnaire_bank.QuestionnaireBank', 'QuestionnaireBank', ([], {'name': '"""metastatic_indefinite"""', 'classification': '"""indefinite"""', 'research_protocol_id': 'metapro_id', 'start': '"""{"days": 0}"""', 'expired': '"""{"years": 50}"""'}), '(name=\'metastatic_indefinite\', classification=\'indefinite\',\n research_protocol_id=metapro_id, start=\'{"days": 0}\', expired=\n \'{"years": 50}\')\n', (7165, 7312), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((7632, 7843), 'portal.models.questionnaire_bank.QuestionnaireBank', 'QuestionnaireBank', ([], {'name': '"""metastatic_recurring3"""', 'classification': '"""recurring"""', 'research_protocol_id': 'metapro_id', 'start': '"""{"days": 0}"""', 'overdue': '"""{"days": 30}"""', 'expired': '"""{"months": 3}"""', 'recurs': '[three_q_recur]'}), '(name=\'metastatic_recurring3\', classification=\'recurring\',\n research_protocol_id=metapro_id, start=\'{"days": 0}\', overdue=\n \'{"days": 30}\', expired=\'{"months": 3}\', recurs=[three_q_recur])\n', (7649, 7843), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((8159, 8369), 'portal.models.questionnaire_bank.QuestionnaireBank', 'QuestionnaireBank', ([], {'name': '"""metastatic_recurring4"""', 'classification': '"""recurring"""', 'research_protocol_id': 'metapro_id', 'recurs': '[four_q_recur]', 'start': '"""{"days": 0}"""', 'overdue': '"""{"days": 30}"""', 'expired': '"""{"months": 3}"""'}), '(name=\'metastatic_recurring4\', classification=\'recurring\',\n research_protocol_id=metapro_id, recurs=[four_q_recur], start=\n \'{"days": 0}\', overdue=\'{"days": 30}\', expired=\'{"months": 3}\')\n', (8176, 8369), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((8685, 8894), 'portal.models.questionnaire_bank.QuestionnaireBank', 'QuestionnaireBank', ([], {'name': '"""metastatic_recurring6"""', 'classification': '"""recurring"""', 'research_protocol_id': 'metapro_id', 'recurs': '[six_q_recur]', 'start': '"""{"days": 0}"""', 'overdue': '"""{"days": 30}"""', 'expired': '"""{"months": 3}"""'}), '(name=\'metastatic_recurring6\', classification=\'recurring\',\n research_protocol_id=metapro_id, recurs=[six_q_recur], start=\n \'{"days": 0}\', overdue=\'{"days": 30}\', expired=\'{"months": 3}\')\n', (8702, 8894), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((9631, 9790), 'portal.models.questionnaire_bank.QuestionnaireBank', 'QuestionnaireBank', ([], {'name': '"""symptom_tracker"""', 'classification': '"""baseline"""', 'intervention_id': 'self_management.id', 'start': '"""{"days": 0}"""', 'expired': '"""{"months": 3}"""'}), '(name=\'symptom_tracker\', classification=\'baseline\',\n intervention_id=self_management.id, start=\'{"days": 0}\', expired=\n \'{"months": 3}\')\n', (9648, 9790), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((10115, 10208), 'portal.models.recur.Recur', 'Recur', ([], {'start': '"""{"months": 3}"""', 'cycle_length': '"""{"months": 3}"""', 'termination': '"""{"months": 27}"""'}), '(start=\'{"months": 3}\', cycle_length=\'{"months": 3}\', termination=\n \'{"months": 27}\')\n', (10120, 10208), False, 'from portal.models.recur import Recur\n'), ((10410, 10599), 'portal.models.questionnaire_bank.QuestionnaireBank', 'QuestionnaireBank', ([], {'name': '"""symptom_tracker_recurring"""', 'classification': '"""recurring"""', 'intervention_id': 'self_management.id', 'start': '"""{"days": 0}"""', 'expired': '"""{"months": 3}"""', 'recurs': '[st_recur]'}), '(name=\'symptom_tracker_recurring\', classification=\n \'recurring\', intervention_id=self_management.id, start=\'{"days": 0}\',\n expired=\'{"months": 3}\', recurs=[st_recur])\n', (10427, 10599), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((1654, 1671), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1669, 1671), False, 'from datetime import datetime\n'), ((2243, 2259), 'flask_webtest.SessionScope', 'SessionScope', (['db'], {}), '(db)\n', (2255, 2259), False, 'from flask_webtest import SessionScope\n'), ((2269, 2288), 'portal.extensions.db.session.add', 'db.session.add', (['enc'], {}), '(enc)\n', (2283, 2288), False, 'from portal.extensions import db\n'), ((2297, 2316), 'portal.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2314, 2316), False, 'from portal.extensions import db\n'), ((2761, 2777), 'flask_webtest.SessionScope', 'SessionScope', (['db'], {}), '(db)\n', (2773, 2777), False, 'from flask_webtest import SessionScope\n'), ((2787, 2805), 'portal.extensions.db.session.add', 'db.session.add', (['qr'], {}), '(qr)\n', (2801, 2805), False, 'from portal.extensions import db\n'), ((2814, 2833), 'portal.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2831, 2833), False, 'from portal.extensions import db\n'), ((4227, 4243), 'flask_webtest.SessionScope', 'SessionScope', (['db'], {}), '(db)\n', (4239, 4243), False, 'from flask_webtest import SessionScope\n'), ((4253, 4287), 'portal.extensions.db.session.add', 'db.session.add', (['localized_protocol'], {}), '(localized_protocol)\n', (4267, 4287), False, 'from portal.extensions import db\n'), ((4296, 4331), 'portal.extensions.db.session.add', 'db.session.add', (['metastatic_protocol'], {}), '(metastatic_protocol)\n', (4310, 4331), False, 'from portal.extensions import db\n'), ((4340, 4359), 'portal.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4357, 4359), False, 'from portal.extensions import db\n'), ((5597, 5634), 'tests.TestCase.add_questionnaire', 'TestCase.add_questionnaire', ([], {'name': 'name'}), '(name=name)\n', (5623, 5634), False, 'from tests import TEST_USER_ID, TestCase, associative_backdate\n'), ((5645, 5661), 'flask_webtest.SessionScope', 'SessionScope', (['db'], {}), '(db)\n', (5657, 5661), False, 'from flask_webtest import SessionScope\n'), ((5671, 5700), 'portal.extensions.db.session.add', 'db.session.add', (['localized_org'], {}), '(localized_org)\n', (5685, 5700), False, 'from portal.extensions import db\n'), ((5709, 5739), 'portal.extensions.db.session.add', 'db.session.add', (['metastatic_org'], {}), '(metastatic_org)\n', (5723, 5739), False, 'from portal.extensions import db\n'), ((5748, 5777), 'portal.extensions.db.session.add', 'db.session.add', (['three_q_recur'], {}), '(three_q_recur)\n', (5762, 5777), False, 'from portal.extensions import db\n'), ((5786, 5814), 'portal.extensions.db.session.add', 'db.session.add', (['four_q_recur'], {}), '(four_q_recur)\n', (5800, 5814), False, 'from portal.extensions import db\n'), ((5823, 5850), 'portal.extensions.db.session.add', 'db.session.add', (['six_q_recur'], {}), '(six_q_recur)\n', (5837, 5850), False, 'from portal.extensions import db\n'), ((5859, 5878), 'portal.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5876, 5878), False, 'from portal.extensions import db\n'), ((6453, 6496), 'portal.models.questionnaire.Questionnaire.find_by_name', 'Questionnaire.find_by_name', ([], {'name': 'instrument'}), '(name=instrument)\n', (6479, 6496), False, 'from portal.models.questionnaire import Questionnaire\n'), ((6511, 6569), 'portal.models.questionnaire_bank.QuestionnaireBankQuestionnaire', 'QuestionnaireBankQuestionnaire', ([], {'questionnaire': 'q', 'rank': 'rank'}), '(questionnaire=q, rank=rank)\n', (6541, 6569), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((6949, 6992), 'portal.models.questionnaire.Questionnaire.find_by_name', 'Questionnaire.find_by_name', ([], {'name': 'instrument'}), '(name=instrument)\n', (6975, 6992), False, 'from portal.models.questionnaire import Questionnaire\n'), ((7007, 7065), 'portal.models.questionnaire_bank.QuestionnaireBankQuestionnaire', 'QuestionnaireBankQuestionnaire', ([], {'questionnaire': 'q', 'rank': 'rank'}), '(questionnaire=q, rank=rank)\n', (7037, 7065), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((7431, 7474), 'portal.models.questionnaire.Questionnaire.find_by_name', 'Questionnaire.find_by_name', ([], {'name': 'instrument'}), '(name=instrument)\n', (7457, 7474), False, 'from portal.models.questionnaire import Questionnaire\n'), ((7489, 7547), 'portal.models.questionnaire_bank.QuestionnaireBankQuestionnaire', 'QuestionnaireBankQuestionnaire', ([], {'questionnaire': 'q', 'rank': 'rank'}), '(questionnaire=q, rank=rank)\n', (7519, 7547), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((7957, 8000), 'portal.models.questionnaire.Questionnaire.find_by_name', 'Questionnaire.find_by_name', ([], {'name': 'instrument'}), '(name=instrument)\n', (7983, 8000), False, 'from portal.models.questionnaire import Questionnaire\n'), ((8015, 8073), 'portal.models.questionnaire_bank.QuestionnaireBankQuestionnaire', 'QuestionnaireBankQuestionnaire', ([], {'questionnaire': 'q', 'rank': 'rank'}), '(questionnaire=q, rank=rank)\n', (8045, 8073), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((8483, 8526), 'portal.models.questionnaire.Questionnaire.find_by_name', 'Questionnaire.find_by_name', ([], {'name': 'instrument'}), '(name=instrument)\n', (8509, 8526), False, 'from portal.models.questionnaire import Questionnaire\n'), ((8541, 8599), 'portal.models.questionnaire_bank.QuestionnaireBankQuestionnaire', 'QuestionnaireBankQuestionnaire', ([], {'questionnaire': 'q', 'rank': 'rank'}), '(questionnaire=q, rank=rank)\n', (8571, 8599), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((9008, 9051), 'portal.models.questionnaire.Questionnaire.find_by_name', 'Questionnaire.find_by_name', ([], {'name': 'instrument'}), '(name=instrument)\n', (9034, 9051), False, 'from portal.models.questionnaire import Questionnaire\n'), ((9066, 9124), 'portal.models.questionnaire_bank.QuestionnaireBankQuestionnaire', 'QuestionnaireBankQuestionnaire', ([], {'questionnaire': 'q', 'rank': 'rank'}), '(questionnaire=q, rank=rank)\n', (9096, 9124), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((9177, 9193), 'flask_webtest.SessionScope', 'SessionScope', (['db'], {}), '(db)\n', (9189, 9193), False, 'from flask_webtest import SessionScope\n'), ((9203, 9223), 'portal.extensions.db.session.add', 'db.session.add', (['l_qb'], {}), '(l_qb)\n', (9217, 9223), False, 'from portal.extensions import db\n'), ((9232, 9253), 'portal.extensions.db.session.add', 'db.session.add', (['mb_qb'], {}), '(mb_qb)\n', (9246, 9253), False, 'from portal.extensions import db\n'), ((9262, 9283), 'portal.extensions.db.session.add', 'db.session.add', (['mi_qb'], {}), '(mi_qb)\n', (9276, 9283), False, 'from portal.extensions import db\n'), ((9292, 9314), 'portal.extensions.db.session.add', 'db.session.add', (['mr3_qb'], {}), '(mr3_qb)\n', (9306, 9314), False, 'from portal.extensions import db\n'), ((9323, 9345), 'portal.extensions.db.session.add', 'db.session.add', (['mr4_qb'], {}), '(mr4_qb)\n', (9337, 9345), False, 'from portal.extensions import db\n'), ((9354, 9376), 'portal.extensions.db.session.add', 'db.session.add', (['mr6_qb'], {}), '(mr6_qb)\n', (9368, 9376), False, 'from portal.extensions import db\n'), ((9385, 9404), 'portal.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (9402, 9404), False, 'from portal.extensions import db\n'), ((9498, 9535), 'tests.TestCase.add_questionnaire', 'TestCase.add_questionnaire', ([], {'name': 'name'}), '(name=name)\n', (9524, 9535), False, 'from tests import TEST_USER_ID, TestCase, associative_backdate\n'), ((9908, 9951), 'portal.models.questionnaire.Questionnaire.find_by_name', 'Questionnaire.find_by_name', ([], {'name': 'instrument'}), '(name=instrument)\n', (9934, 9951), False, 'from portal.models.questionnaire import Questionnaire\n'), ((9966, 10024), 'portal.models.questionnaire_bank.QuestionnaireBankQuestionnaire', 'QuestionnaireBankQuestionnaire', ([], {'questionnaire': 'q', 'rank': 'rank'}), '(questionnaire=q, rank=rank)\n', (9996, 10024), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((10231, 10247), 'flask_webtest.SessionScope', 'SessionScope', (['db'], {}), '(db)\n', (10243, 10247), False, 'from flask_webtest import SessionScope\n'), ((10257, 10278), 'portal.extensions.db.session.add', 'db.session.add', (['st_qb'], {}), '(st_qb)\n', (10271, 10278), False, 'from portal.extensions import db\n'), ((10287, 10311), 'portal.extensions.db.session.add', 'db.session.add', (['st_recur'], {}), '(st_recur)\n', (10301, 10311), False, 'from portal.extensions import db\n'), ((10320, 10339), 'portal.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (10337, 10339), False, 'from portal.extensions import db\n'), ((10725, 10768), 'portal.models.questionnaire.Questionnaire.find_by_name', 'Questionnaire.find_by_name', ([], {'name': 'instrument'}), '(name=instrument)\n', (10751, 10768), False, 'from portal.models.questionnaire import Questionnaire\n'), ((10783, 10841), 'portal.models.questionnaire_bank.QuestionnaireBankQuestionnaire', 'QuestionnaireBankQuestionnaire', ([], {'questionnaire': 'q', 'rank': 'rank'}), '(questionnaire=q, rank=rank)\n', (10813, 10841), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((10898, 10914), 'flask_webtest.SessionScope', 'SessionScope', (['db'], {}), '(db)\n', (10910, 10914), False, 'from flask_webtest import SessionScope\n'), ((10924, 10951), 'portal.extensions.db.session.add', 'db.session.add', (['st_recur_qb'], {}), '(st_recur_qb)\n', (10938, 10951), False, 'from portal.extensions import db\n'), ((10960, 10979), 'portal.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (10977, 10979), False, 'from portal.extensions import db\n'), ((12264, 12287), 'portal.extensions.db.session.merge', 'db.session.merge', (['staff'], {}), '(staff)\n', (12280, 12287), False, 'from portal.extensions import db\n'), ((12305, 12376), 'portal.models.questionnaire_response.aggregate_responses', 'aggregate_responses', ([], {'instrument_ids': '[instrument_id]', 'current_user': 'staff'}), '(instrument_ids=[instrument_id], current_user=staff)\n', (12324, 12376), False, 'from portal.models.questionnaire_response import QuestionnaireResponse, aggregate_responses, qnr_document_id\n'), ((12929, 12994), 'portal.models.identifier.Identifier', 'Identifier', ([], {'system': 'wanted_system', 'use': '"""secondary"""', 'value': 'id_value'}), "(system=wanted_system, use='secondary', value=id_value)\n", (12939, 12994), False, 'from portal.models.identifier import Identifier\n'), ((13022, 13089), 'portal.models.identifier.Identifier', 'Identifier', ([], {'system': 'unwanted_system', 'use': '"""secondary"""', 'value': 'id_value'}), "(system=unwanted_system, use='secondary', value=id_value)\n", (13032, 13089), False, 'from portal.models.identifier import Identifier\n'), ((13839, 13862), 'portal.extensions.db.session.merge', 'db.session.merge', (['staff'], {}), '(staff)\n', (13855, 13862), False, 'from portal.extensions import db\n'), ((13880, 13951), 'portal.models.questionnaire_response.aggregate_responses', 'aggregate_responses', ([], {'instrument_ids': '[instrument_id]', 'current_user': 'staff'}), '(instrument_ids=[instrument_id], current_user=staff)\n', (13899, 13951), False, 'from portal.models.questionnaire_response import QuestionnaireResponse, aggregate_responses, qnr_document_id\n'), ((13979, 14000), 'portal.extensions.db.session.merge', 'db.session.merge', (['id1'], {}), '(id1)\n', (13995, 14000), False, 'from portal.extensions import db\n'), ((14479, 14510), 'portal.models.questionnaire_bank.QuestionnaireBank.query.first', 'QuestionnaireBank.query.first', ([], {}), '()\n', (14508, 14510), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((14649, 14669), 'portal.extensions.db.session.merge', 'db.session.merge', (['qb'], {}), '(qb)\n', (14665, 14669), False, 'from portal.extensions import db\n'), ((14687, 14830), 'portal.models.questionnaire_response.qnr_document_id', 'qnr_document_id', ([], {'subject_id': 'TEST_USER_ID', 'questionnaire_bank_id': 'qb.id', 'questionnaire_name': '"""irondemog"""', 'iteration': 'None', 'status': '"""in-progress"""'}), "(subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id,\n questionnaire_name='irondemog', iteration=None, status='in-progress')\n", (14702, 14830), False, 'from portal.models.questionnaire_response import QuestionnaireResponse, aggregate_responses, qnr_document_id\n'), ((14970, 15001), 'portal.models.questionnaire_bank.QuestionnaireBank.query.first', 'QuestionnaireBank.query.first', ([], {}), '()\n', (14999, 15001), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((15015, 15035), 'portal.extensions.db.session.merge', 'db.session.merge', (['qb'], {}), '(qb)\n', (15031, 15035), False, 'from portal.extensions import db\n'), ((15506, 15538), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (15522, 15538), False, 'from portal.extensions import db\n'), ((15554, 15590), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'user', 'as_of_date': 'now'}), '(user=user, as_of_date=now)\n', (15563, 15590), False, 'from portal.models.qb_status import QB_Status\n'), ((15895, 15927), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (15911, 15927), False, 'from portal.extensions import db\n'), ((15943, 15979), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'user', 'as_of_date': 'now'}), '(user=user, as_of_date=now)\n', (15952, 15979), False, 'from portal.models.qb_status import QB_Status\n'), ((16242, 16274), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (16258, 16274), False, 'from portal.extensions import db\n'), ((16332, 16378), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'now'}), '(user=self.test_user, as_of_date=now)\n', (16341, 16378), False, 'from portal.models.qb_status import QB_Status\n'), ((16829, 16861), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (16845, 16861), False, 'from portal.extensions import db\n'), ((16876, 16922), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'now'}), '(user=self.test_user, as_of_date=now)\n', (16885, 16922), False, 'from portal.models.qb_status import QB_Status\n'), ((17645, 17677), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (17661, 17677), False, 'from portal.extensions import db\n'), ((17692, 17738), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'now'}), '(user=self.test_user, as_of_date=now)\n', (17701, 17738), False, 'from portal.models.qb_status import QB_Status\n'), ((18234, 18266), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (18250, 18266), False, 'from portal.extensions import db\n'), ((18281, 18327), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'now'}), '(user=self.test_user, as_of_date=now)\n', (18290, 18327), False, 'from portal.models.qb_status import QB_Status\n'), ((19088, 19120), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (19104, 19120), False, 'from portal.extensions import db\n'), ((19135, 19181), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'now'}), '(user=self.test_user, as_of_date=now)\n', (19144, 19181), False, 'from portal.models.qb_status import QB_Status\n'), ((19609, 19641), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (19625, 19641), False, 'from portal.extensions import db\n'), ((19656, 19702), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'now'}), '(user=self.test_user, as_of_date=now)\n', (19665, 19702), False, 'from portal.models.qb_status import QB_Status\n'), ((20770, 20802), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (20786, 20802), False, 'from portal.extensions import db\n'), ((20817, 20866), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'nowish'}), '(user=self.test_user, as_of_date=nowish)\n', (20826, 20866), False, 'from portal.models.qb_status import QB_Status\n'), ((21752, 21784), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (21768, 21784), False, 'from portal.extensions import db\n'), ((21853, 21906), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'as_of_date'}), '(user=self.test_user, as_of_date=as_of_date)\n', (21862, 21906), False, 'from portal.models.qb_status import QB_Status\n'), ((22836, 22868), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (22852, 22868), False, 'from portal.extensions import db\n'), ((22937, 22990), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'as_of_date'}), '(user=self.test_user, as_of_date=as_of_date)\n', (22946, 22990), False, 'from portal.models.qb_status import QB_Status\n'), ((23681, 23713), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (23697, 23713), False, 'from portal.extensions import db\n'), ((23728, 23777), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'nowish'}), '(user=self.test_user, as_of_date=nowish)\n', (23737, 23777), False, 'from portal.models.qb_status import QB_Status\n'), ((24486, 24518), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (24502, 24518), False, 'from portal.extensions import db\n'), ((24533, 24582), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'nowish'}), '(user=self.test_user, as_of_date=nowish)\n', (24542, 24582), False, 'from portal.models.qb_status import QB_Status\n'), ((25665, 25697), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (25681, 25697), False, 'from portal.extensions import db\n'), ((25767, 25819), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'backdated'}), '(user=self.test_user, as_of_date=backdated)\n', (25776, 25819), False, 'from portal.models.qb_status import QB_Status\n'), ((26060, 26109), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'nowish'}), '(user=self.test_user, as_of_date=nowish)\n', (26069, 26109), False, 'from portal.models.qb_status import QB_Status\n'), ((26734, 26766), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (26750, 26766), False, 'from portal.extensions import db\n'), ((26781, 26830), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'nowish'}), '(user=self.test_user, as_of_date=nowish)\n', (26790, 26830), False, 'from portal.models.qb_status import QB_Status\n'), ((27572, 27604), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (27588, 27604), False, 'from portal.extensions import db\n'), ((27810, 27842), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (27826, 27842), False, 'from portal.extensions import db\n'), ((27857, 27903), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'now'}), '(user=self.test_user, as_of_date=now)\n', (27866, 27903), False, 'from portal.models.qb_status import QB_Status\n'), ((28249, 28281), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (28265, 28281), False, 'from portal.extensions import db\n'), ((28296, 28345), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'nowish'}), '(user=self.test_user, as_of_date=nowish)\n', (28305, 28345), False, 'from portal.models.qb_status import QB_Status\n'), ((28734, 28766), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (28750, 28766), False, 'from portal.extensions import db\n'), ((28781, 28830), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'nowish'}), '(user=self.test_user, as_of_date=nowish)\n', (28790, 28830), False, 'from portal.models.qb_status import QB_Status\n'), ((29352, 29384), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (29368, 29384), False, 'from portal.extensions import db\n'), ((29399, 29448), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'nowish'}), '(user=self.test_user, as_of_date=nowish)\n', (29408, 29448), False, 'from portal.models.qb_status import QB_Status\n'), ((30103, 30135), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (30119, 30135), False, 'from portal.extensions import db\n'), ((30150, 30199), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'nowish'}), '(user=self.test_user, as_of_date=nowish)\n', (30159, 30199), False, 'from portal.models.qb_status import QB_Status\n'), ((30754, 30786), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (30770, 30786), False, 'from portal.extensions import db\n'), ((30801, 30850), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'nowish'}), '(user=self.test_user, as_of_date=nowish)\n', (30810, 30850), False, 'from portal.models.qb_status import QB_Status\n'), ((31339, 31372), 'datetime.datetime', 'datetime', (['(2000)', '(3)', '(12)', '(0)', '(0)', '(0)', '(0)'], {}), '(2000, 3, 12, 0, 0, 0, 0)\n', (31347, 31372), False, 'from datetime import datetime\n'), ((31525, 31557), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (31541, 31557), False, 'from portal.extensions import db\n'), ((31572, 31621), 'portal.models.qb_status.QB_Status', 'QB_Status', ([], {'user': 'self.test_user', 'as_of_date': 'nowish'}), '(user=self.test_user, as_of_date=nowish)\n', (31581, 31621), False, 'from portal.models.qb_status import QB_Status\n'), ((32073, 32105), 'portal.extensions.db.session.merge', 'db.session.merge', (['self.test_user'], {}), '(self.test_user)\n', (32089, 32105), False, 'from portal.extensions import db\n'), ((32347, 32377), 'portal.models.qb_status.QB_Status', 'QB_Status', (['self.test_user', 'now'], {}), '(self.test_user, now)\n', (32356, 32377), False, 'from portal.models.qb_status import QB_Status\n'), ((2391, 2408), 'portal.models.user.get_user', 'get_user', (['user_id'], {}), '(user_id)\n', (2399, 2408), False, 'from portal.models.user import get_user\n'), ((13189, 13205), 'flask_webtest.SessionScope', 'SessionScope', (['db'], {}), '(db)\n', (13201, 13205), False, 'from flask_webtest import SessionScope\n'), ((13219, 13238), 'portal.extensions.db.session.commit', 'db.session.commit', ([], {}), '()\n', (13236, 13238), False, 'from portal.extensions import db\n'), ((15049, 15077), 'pytest.raises', 'pytest.raises', (['NoResultFound'], {}), '(NoResultFound)\n', (15062, 15077), False, 'import pytest\n'), ((15100, 15243), 'portal.models.questionnaire_response.qnr_document_id', 'qnr_document_id', ([], {'subject_id': 'TEST_USER_ID', 'questionnaire_bank_id': 'qb.id', 'questionnaire_name': '"""irondemog"""', 'iteration': 'None', 'status': '"""in-progress"""'}), "(subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id,\n questionnaire_name='irondemog', iteration=None, status='in-progress')\n", (15115, 15243), False, 'from portal.models.questionnaire_response import QuestionnaireResponse, aggregate_responses, qnr_document_id\n'), ((21817, 21838), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(2)'}), '(days=2)\n', (21830, 21838), False, 'from dateutil.relativedelta import relativedelta\n'), ((22901, 22922), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(2)'}), '(days=2)\n', (22914, 22922), False, 'from dateutil.relativedelta import relativedelta\n'), ((25390, 25422), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(2)', 'days': '(25)'}), '(months=2, days=25)\n', (25403, 25422), False, 'from dateutil.relativedelta import relativedelta\n'), ((27649, 27674), 'portal.models.organization.Organization.query.get', 'Organization.query.get', (['(0)'], {}), '(0)\n', (27671, 27674), False, 'from portal.models.organization import Organization\n'), ((1583, 1604), 'random.choice', 'choice', (['ascii_letters'], {}), '(ascii_letters)\n', (1589, 1604), False, 'from random import choice\n'), ((11562, 11594), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(9)', 'hours': '(1)'}), '(months=9, hours=1)\n', (11575, 11594), False, 'from dateutil.relativedelta import relativedelta\n'), ((12835, 12895), 'portal.models.organization.Organization.query.filter', 'Organization.query.filter', (["(Organization.name == 'metastatic')"], {}), "(Organization.name == 'metastatic')\n", (12860, 12895), False, 'from portal.models.organization import Organization\n'), ((13319, 13351), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(9)', 'hours': '(1)'}), '(months=9, hours=1)\n', (13332, 13351), False, 'from dateutil.relativedelta import relativedelta\n'), ((18909, 18972), 'portal.models.questionnaire_bank.QuestionnaireBank.query.filter_by', 'QuestionnaireBank.query.filter_by', ([], {'name': '"""metastatic_indefinite"""'}), "(name='metastatic_indefinite')\n", (18942, 18972), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((20470, 20502), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(3)', 'hours': '(1)'}), '(months=3, hours=1)\n', (20483, 20502), False, 'from dateutil.relativedelta import relativedelta\n'), ((21441, 21464), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(3)'}), '(months=3)\n', (21454, 21464), False, 'from dateutil.relativedelta import relativedelta\n'), ((22537, 22560), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(3)'}), '(months=3)\n', (22550, 22560), False, 'from dateutil.relativedelta import relativedelta\n'), ((23529, 23561), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(3)', 'hours': '(1)'}), '(months=3, hours=1)\n', (23542, 23561), False, 'from dateutil.relativedelta import relativedelta\n'), ((24334, 24366), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(9)', 'hours': '(1)'}), '(months=9, hours=1)\n', (24347, 24366), False, 'from dateutil.relativedelta import relativedelta\n'), ((25135, 25166), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(3)', 'days': '(2)'}), '(months=3, days=2)\n', (25148, 25166), False, 'from dateutil.relativedelta import relativedelta\n'), ((25442, 25494), 'portal.models.questionnaire_bank.QuestionnaireBank.query.filter_by', 'QuestionnaireBank.query.filter_by', ([], {'name': '"""metastatic"""'}), "(name='metastatic')\n", (25475, 25494), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((26582, 26614), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(6)', 'hours': '(1)'}), '(months=6, hours=1)\n', (26595, 26614), False, 'from dateutil.relativedelta import relativedelta\n'), ((28097, 28130), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(3)', 'hours': '(-1)'}), '(months=3, hours=-1)\n', (28110, 28130), False, 'from dateutil.relativedelta import relativedelta\n'), ((28583, 28615), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(3)', 'hours': '(1)'}), '(months=3, hours=1)\n', (28596, 28615), False, 'from dateutil.relativedelta import relativedelta\n'), ((29032, 29065), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(3)', 'hours': '(-1)'}), '(months=3, hours=-1)\n', (29045, 29065), False, 'from dateutil.relativedelta import relativedelta\n'), ((29664, 29697), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(6)', 'hours': '(-1)'}), '(months=6, hours=-1)\n', (29677, 29697), False, 'from dateutil.relativedelta import relativedelta\n'), ((29809, 29872), 'portal.models.questionnaire_bank.QuestionnaireBank.query.filter_by', 'QuestionnaireBank.query.filter_by', ([], {'name': '"""metastatic_recurring3"""'}), "(name='metastatic_recurring3')\n", (29842, 29872), False, 'from portal.models.questionnaire_bank import QuestionnaireBank, QuestionnaireBankQuestionnaire\n'), ((30413, 30445), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(3)', 'hours': '(1)'}), '(months=3, hours=1)\n', (30426, 30445), False, 'from dateutil.relativedelta import relativedelta\n'), ((31134, 31166), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(4)', 'hours': '(1)'}), '(months=4, hours=1)\n', (31147, 31166), False, 'from dateutil.relativedelta import relativedelta\n'), ((32236, 32288), 'portal.models.audit.Audit', 'Audit', ([], {'user_id': 'TEST_USER_ID', 'subject_id': 'TEST_USER_ID'}), '(user_id=TEST_USER_ID, subject_id=TEST_USER_ID)\n', (32241, 32288), False, 'from portal.models.audit import Audit\n'), ((11844, 11877), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': 'months_back'}), '(months=months_back)\n', (11857, 11877), False, 'from dateutil.relativedelta import relativedelta\n'), ((12102, 12162), 'portal.models.organization.Organization.query.filter', 'Organization.query.filter', (["(Organization.name == 'metastatic')"], {}), "(Organization.name == 'metastatic')\n", (12127, 12162), False, 'from portal.models.organization import Organization\n'), ((13677, 13737), 'portal.models.organization.Organization.query.filter', 'Organization.query.filter', (["(Organization.name == 'metastatic')"], {}), "(Organization.name == 'metastatic')\n", (13702, 13737), False, 'from portal.models.organization import Organization\n'), ((30706, 30727), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(1)'}), '(days=1)\n', (30719, 30727), False, 'from dateutil.relativedelta import relativedelta\n')]
|
'''
File: \resource.py
Project: NumberRecongization
Created Date: Monday March 26th 2018
Author: Huisama
-----
Last Modified: Saturday March 31st 2018 11:08:21 pm
Modified By: Huisama
-----
Copyright (c) 2018 Hui
'''
import os
import scipy.misc as scm
import random
import numpy as np
import PIL
# STD_WIDTH = 667
# STD_HEIGHT = 83
STD_WIDTH = 252
STD_HEIGHT = 40
import matplotlib.pyplot as plt
'''
This class stands for dataset and provides data processing oparations
'''
class DataSet(object):
def __init__(self, data_dir, batch_size):
self.data_dir = data_dir
self.batch_size = batch_size
self.train_set_ratio = 0.8
self.validate_set_ratio = 0.1
'''
Get mean width and height of dataset
'''
def get_data_mean_size(self):
full_width, full_height = 0, 0
count = 0
def dummy(self, dir, file):
nonlocal full_width, full_height, count
filename = os.path.splitext(file)
if filename[1] == '.png':
fullfile = os.path.join(self.data_dir, dir, file)
width, height = self.get_size(fullfile)
full_width += width
full_height += height
print("%s, %s" % (width, height))
count += 1
self.lookup_dataset_dir(dummy)
return full_width / count, full_height / count
'''
Get width and height of a single image
'''
def get_size(self, image_file_path):
img = scm.imread(image_file_path)
return img.shape[1], img.shape[0]
'''
Load dataset
'''
def load_dataset(self):
self.neg_data = []
self.pos_data = []
self.poscount = 0
self.negcount = 0
def dummy(self, dir, file):
if file == 'dataset.txt':
# open and read in
with open(os.path.join(self.data_dir, dir, file)) as file:
for line in file:
newline = line.strip()
splittext = newline.split('\t')
if int(splittext[2]) == 1:
self.pos_data.append((
os.path.join(self.data_dir, dir, splittext[0]),
os.path.join(self.data_dir, dir, splittext[1]),
int(splittext[2])))
self.poscount += 1
else:
self.neg_data.append((
os.path.join(self.data_dir, dir, splittext[0]),
os.path.join(self.data_dir, dir, splittext[1]),
int(splittext[2])))
self.negcount += 1
self.lookup_dataset_dir(dummy)
# print("negcount: %d, poscount: %d" % (self.negcount, self.poscount))
return True
'''
Check if image has 4 channel
'''
def check_image_channels(self):
def dummy(self, dir, file):
filename = os.path.splitext(file)
if filename[1] == '.png':
fullfile = os.path.join(self.data_dir, dir, file)
img = scm.imread(fullfile)
if img.shape[2] != 3:
print("Wrong image: %d", fullfile)
self.lookup_dataset_dir(dummy)
'''
Generate dataset after loading dataset
'''
def generate_dataset(self):
random.shuffle(self.neg_data)
random.shuffle(self.pos_data)
# total = len(self.data)
pos_total = len(self.pos_data)
pos_train_size = int(pos_total * self.train_set_ratio)
pos_validate_size = int(pos_total * self.validate_set_ratio)
# pos_test_size = pos_total - pos_train_size - pos_validate_size
neg_total = len(self.neg_data)
neg_train_size = int(neg_total * self.train_set_ratio)
neg_validate_size = int(neg_total * self.validate_set_ratio)
# neg_test_size = neg_total - neg_train_size - neg_validate_size
self.batch_index = 0
self.pos_train_set = self.pos_data[0 : pos_train_size]
pos_validation_set = self.pos_data[pos_train_size : pos_train_size + pos_validate_size]
pos_test_set = self.pos_data[pos_train_size + pos_validate_size : pos_total]
self.neg_train_set = self.neg_data[0 : neg_train_size]
neg_validation_set = self.neg_data[neg_train_size : neg_train_size + neg_validate_size]
neg_test_set = self.neg_data[neg_train_size + neg_validate_size : neg_total]
dec = len(neg_validation_set) - len(pos_validation_set)
for _ in range(dec):
pos_validation_set.append(random.choice(self.pos_data))
dec = len(neg_test_set) - len(pos_test_set)
for _ in range(dec):
pos_test_set.append(random.choice(self.pos_data))
self.validation_set = []
self.validation_set.extend(pos_validation_set)
self.validation_set.extend(neg_validation_set)
self.test_set = []
self.test_set.extend(pos_test_set)
self.test_set.extend(neg_test_set)
'''
Ergodic files in dataset dir
'''
def lookup_dataset_dir(self, callback):
for _, dirs, _ in os.walk(self.data_dir):
for dir in dirs:
for _, _, files in os.walk(os.path.join(self.data_dir, dir)):
for file in files:
callback(self, dir, file)
'''
Get iamge data
'''
def get_image_data(self, tp):
image1, image2 = scm.imread(tp[0]), scm.imread(tp[1])
newimg1 = np.array(scm.imresize(image1, (STD_HEIGHT, STD_WIDTH)))
newimg2 = np.array(scm.imresize(image2, (STD_HEIGHT, STD_WIDTH)))
# img_comb = np.hstack((newimg1, newimg2))[:, :, np.newaxis]
img_comb = np.dstack((newimg1, newimg2))
return img_comb / 255.0
'''
Get a batch of dataset
'''
def next_batch(self, batch_size):
random_neg = batch_size // 2
random_pos = batch_size - random_neg
org_pos_data = []
org_neg_data = []
for _ in range(random_pos):
org_pos_data.append(random.choice(self.pos_train_set))
for _ in range(random_neg):
org_neg_data.append(random.choice(self.neg_train_set))
pos_data = list(map(self.get_image_data, org_pos_data))
pos_labels = list(map(lambda e: e[2], org_pos_data))
neg_data = list(map(self.get_image_data, org_neg_data))
neg_labels = list(map(lambda e: e[2], org_neg_data))
pos_data.extend(neg_data)
pos_labels.extend(neg_labels)
return np.array(pos_data), np.array(pos_labels)
'''
Get validation dataset
'''
def get_validation_set(self):
data = np.array(list(map(self.get_image_data, self.validation_set)))
labels = np.array(list(map(lambda e: e[2], self.validation_set)))
return data, labels
'''
Get test dataset
'''
def get_test_set(self):
data = np.array(list(map(self.get_image_data, self.test_set)))
labels = np.array(list(map(lambda e: e[2], self.test_set)))
return data, labels
# obj = DataSet('./Pic', 8)
# obj.check_image_channels()
# obj.load_dataset()
# obj.generate_dataset()
# data, labels = obj.next_batch(8)
# while done != True:
# print(data[0][0].dtype)
# data, labels, done = obj.next_batch()
|
[
"numpy.dstack",
"random.shuffle",
"os.walk",
"random.choice",
"numpy.array",
"os.path.splitext",
"scipy.misc.imresize",
"os.path.join",
"scipy.misc.imread"
] |
[((1508, 1535), 'scipy.misc.imread', 'scm.imread', (['image_file_path'], {}), '(image_file_path)\n', (1518, 1535), True, 'import scipy.misc as scm\n'), ((3505, 3534), 'random.shuffle', 'random.shuffle', (['self.neg_data'], {}), '(self.neg_data)\n', (3519, 3534), False, 'import random\n'), ((3543, 3572), 'random.shuffle', 'random.shuffle', (['self.pos_data'], {}), '(self.pos_data)\n', (3557, 3572), False, 'import random\n'), ((5316, 5338), 'os.walk', 'os.walk', (['self.data_dir'], {}), '(self.data_dir)\n', (5323, 5338), False, 'import os\n'), ((5919, 5948), 'numpy.dstack', 'np.dstack', (['(newimg1, newimg2)'], {}), '((newimg1, newimg2))\n', (5928, 5948), True, 'import numpy as np\n'), ((960, 982), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (976, 982), False, 'import os\n'), ((3097, 3119), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (3113, 3119), False, 'import os\n'), ((5635, 5652), 'scipy.misc.imread', 'scm.imread', (['tp[0]'], {}), '(tp[0])\n', (5645, 5652), True, 'import scipy.misc as scm\n'), ((5654, 5671), 'scipy.misc.imread', 'scm.imread', (['tp[1]'], {}), '(tp[1])\n', (5664, 5671), True, 'import scipy.misc as scm\n'), ((5700, 5745), 'scipy.misc.imresize', 'scm.imresize', (['image1', '(STD_HEIGHT, STD_WIDTH)'], {}), '(image1, (STD_HEIGHT, STD_WIDTH))\n', (5712, 5745), True, 'import scipy.misc as scm\n'), ((5774, 5819), 'scipy.misc.imresize', 'scm.imresize', (['image2', '(STD_HEIGHT, STD_WIDTH)'], {}), '(image2, (STD_HEIGHT, STD_WIDTH))\n', (5786, 5819), True, 'import scipy.misc as scm\n'), ((6750, 6768), 'numpy.array', 'np.array', (['pos_data'], {}), '(pos_data)\n', (6758, 6768), True, 'import numpy as np\n'), ((6770, 6790), 'numpy.array', 'np.array', (['pos_labels'], {}), '(pos_labels)\n', (6778, 6790), True, 'import numpy as np\n'), ((1048, 1086), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir', 'file'], {}), '(self.data_dir, dir, file)\n', (1060, 1086), False, 'import os\n'), ((3185, 3223), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir', 'file'], {}), '(self.data_dir, dir, file)\n', (3197, 3223), False, 'import os\n'), ((3246, 3266), 'scipy.misc.imread', 'scm.imread', (['fullfile'], {}), '(fullfile)\n', (3256, 3266), True, 'import scipy.misc as scm\n'), ((4760, 4788), 'random.choice', 'random.choice', (['self.pos_data'], {}), '(self.pos_data)\n', (4773, 4788), False, 'import random\n'), ((4904, 4932), 'random.choice', 'random.choice', (['self.pos_data'], {}), '(self.pos_data)\n', (4917, 4932), False, 'import random\n'), ((6271, 6304), 'random.choice', 'random.choice', (['self.pos_train_set'], {}), '(self.pos_train_set)\n', (6284, 6304), False, 'import random\n'), ((6375, 6408), 'random.choice', 'random.choice', (['self.neg_train_set'], {}), '(self.neg_train_set)\n', (6388, 6408), False, 'import random\n'), ((5412, 5444), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir'], {}), '(self.data_dir, dir)\n', (5424, 5444), False, 'import os\n'), ((1887, 1925), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir', 'file'], {}), '(self.data_dir, dir, file)\n', (1899, 1925), False, 'import os\n'), ((2240, 2286), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir', 'splittext[0]'], {}), '(self.data_dir, dir, splittext[0])\n', (2252, 2286), False, 'import os\n'), ((2320, 2366), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir', 'splittext[1]'], {}), '(self.data_dir, dir, splittext[1])\n', (2332, 2366), False, 'import os\n'), ((2580, 2626), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir', 'splittext[0]'], {}), '(self.data_dir, dir, splittext[0])\n', (2592, 2626), False, 'import os\n'), ((2660, 2706), 'os.path.join', 'os.path.join', (['self.data_dir', 'dir', 'splittext[1]'], {}), '(self.data_dir, dir, splittext[1])\n', (2672, 2706), False, 'import os\n')]
|
import os
import json
import sys
from tqdm import tqdm
from evaluation.mesh_utils import read_obj, read_ply, calculate_face_area, compute_face_centers, \
nearest_neighbour_of_face_centers
from iou_calculations import *
# BuildNet directories
BUILDNET_BASE_DIR = os.path.join(os.sep, "media", "maria", "BigData1", "Maria", "buildnet_data_2k")
assert (os.path.isdir(BUILDNET_BASE_DIR))
BUILDNET_OBJ_DIR = os.path.join(BUILDNET_BASE_DIR, "flippedNormal_unit_obj_withtexture")
assert (os.path.isdir(BUILDNET_OBJ_DIR))
BUILDNET_PTS_DIR = os.path.join(BUILDNET_BASE_DIR, "100K_inverted_normals", "nocolor")
assert (BUILDNET_PTS_DIR)
BUILDNET_PTS_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, "100K_inverted_normals", "point_labels_32")
assert (BUILDNET_PTS_LABELS_DIR)
BUILDNET_PTS_FACEINDEX_DIR = os.path.join(BUILDNET_BASE_DIR, "100K_inverted_normals", "faceindex")
assert (os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR))
BUILDNET_COMP_TO_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, "100K_inverted_normals", "component_label_32")
assert (os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR))
BUILDNET_SPLITS_DIR = os.path.join(BUILDNET_BASE_DIR, "dataset")
assert (os.path.isdir(BUILDNET_SPLITS_DIR))
BUILDNET_TEST_SPLIT = os.path.join(BUILDNET_SPLITS_DIR, "test_split.txt")
assert (os.path.isfile(BUILDNET_TEST_SPLIT))
# Network results directory
NET_RESULTS_DIR = sys.argv[1]
assert (os.path.isdir(NET_RESULTS_DIR))
# Create directories for best results
BEST_POINTS_DIR = os.path.join(NET_RESULTS_DIR, "best_points")
os.makedirs(BEST_POINTS_DIR, exist_ok=True)
BEST_TRIANGLES_DIR = os.path.join(NET_RESULTS_DIR, "best_triangles")
os.makedirs(BEST_TRIANGLES_DIR, exist_ok=True)
BEST_COMP_DIR = os.path.join(NET_RESULTS_DIR, "best_comp")
os.makedirs(BEST_COMP_DIR, exist_ok=True)
# Create directories for aggregated mesh features
FACE_FEAT_FROM_TR_DIR = os.path.join(NET_RESULTS_DIR, "face_feat_from_tr")
os.makedirs(FACE_FEAT_FROM_TR_DIR, exist_ok=True)
FACE_FEAT_FROM_COMP_DIR = os.path.join(NET_RESULTS_DIR, "face_feat_from_comp")
os.makedirs(FACE_FEAT_FROM_COMP_DIR, exist_ok=True)
def classification_accuracy(ground, prediction, face_area=None):
"""
Classification accuracy
:param ground: N x 1, numpy.ndarray(int)
:param prediction: N x 1, numpy.ndarray(int)
:param face_area: N x 1, numpy.ndarray(float)
:return:
accuracy: float
"""
prediction = np.copy(prediction)
ground = np.copy(ground)
non_zero_idx = np.squeeze(ground != 0).nonzero()[0]
ground = ground[non_zero_idx]
prediction = prediction[non_zero_idx]
if face_area is not None:
face_area = np.copy(face_area)
face_area = face_area[non_zero_idx]
accuracy = np.dot(face_area.T, ground == prediction)[0] / np.sum(face_area)
accuracy = accuracy[0]
else:
accuracy = np.sum(ground == prediction) / float(len(ground))
return accuracy
def transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=False):
"""
Transfer point predictions to triangles and components through avg pooling
:param vertices: N x 3, numpy.ndarray(float)
:param faces: M x 3, numpy.ndarray(int)
:param components: M x 1, numpy.ndarray(int)
:param points: K x 3, numpy.ndarray(float)
:param point_feat: K x 31, numpy.ndarray(float)
:param point_face_index: K x 1, numpy.ndarray(int)
:param max_pool: bool
:return:
face_labels_from_triangle_avg_pool: M x 1, numpy.ndarray(int)
face_labels_from_comp_avg_pool: M x 1, numpy.ndarray(int)
face_feat_from_tr_avg_pool: M x 31, numpy.ndarray(float)
face_feat_from_comp_avg_pool: M x 31, numpy.ndarray(float)
face_labels_from_triangle_max_pool: M x 1, numpy.ndarray(int)
face_labels_from_comp_max_pool: M x 1, numpy.ndarray(int)
"""
n_components = len(np.unique(components))
face_feat_from_tr_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1]))
face_feat_from_comp_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1]))
comp_feat_avg_pool = np.zeros((n_components, point_feat.shape[1]))
if max_pool:
face_feat_from_tr_max_pool = np.zeros_like(face_feat_from_tr_avg_pool)
face_feat_from_comp_max_pool = np.zeros_like(face_feat_from_comp_avg_pool)
comp_feat_max_pool = np.zeros_like(comp_feat_avg_pool)
face_point_index = {}
# Find faces that have no corresponding points
sampled = set(point_face_index.flatten())
unsampled = list(set(np.arange(len(faces))) - sampled) # faces with no sample points
face_centers = compute_face_centers(faces, unsampled, vertices)
# Transfer point predictions to triangles
# Find nearest point and assign its point feature to each unsampled face
nearest_neighbour_of_face_centers(face_centers, face_feat_from_tr_avg_pool, face_point_index,
point_feat, points, unsampled)
if max_pool: # unsampled faces have only one point, so max == avg. feat. , that of the nearest point
face_feat_from_tr_max_pool = np.copy(face_feat_from_tr_avg_pool)
# Use avg pooling for sampled faces
for face in sampled:
mask = np.squeeze(point_face_index == face)
face_feat_from_tr_avg_pool[face] = np.mean(point_feat[mask], axis=0)
if max_pool:
# Use max pooling also
face_feat_from_tr_max_pool[face] = np.amax(point_feat[mask], axis=0)
face_point_index[face] = mask.nonzero()[0].tolist()
# Transfer point predictions to components
for comp_idx in range(comp_feat_avg_pool.shape[0]):
face_idx = np.squeeze(components == comp_idx).nonzero()[0]
point_idx = []
for idx in face_idx:
try:
point_idx.extend(face_point_index[int(idx)])
except:
point_idx.append(face_point_index[int(idx)])
comp_feat_avg_pool[comp_idx] = np.mean(point_feat[point_idx], axis=0)
face_feat_from_comp_avg_pool[face_idx] = comp_feat_avg_pool[comp_idx]
if max_pool:
comp_feat_max_pool[comp_idx] = np.amax(point_feat[point_idx], axis=0)
face_feat_from_comp_max_pool[face_idx] = comp_feat_max_pool[comp_idx]
face_labels_from_tr_avg_pool = np.argmax(face_feat_from_tr_avg_pool, axis=1)[:,
np.newaxis] + 1 # we exclude undetermined (label 0) during training
face_labels_from_comp_avg_pool = np.argmax(face_feat_from_comp_avg_pool, axis=1)[:, np.newaxis] + 1
if max_pool:
face_labels_from_tr_max_pool = np.argmax(face_feat_from_tr_max_pool, axis=1)[:, np.newaxis] + 1
face_labels_from_comp_max_pool = np.argmax(face_feat_from_comp_max_pool, axis=1)[:, np.newaxis] + 1
return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \
face_feat_from_comp_avg_pool, face_labels_from_tr_max_pool, face_labels_from_comp_max_pool
return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \
face_feat_from_comp_avg_pool
def get_split_models(split_fn):
"""
Read split.txt file and return model names
:param split_fn:
:return:
models_fn: list(str)
"""
models_fn = []
with open(split_fn, 'r') as fin:
for line in fin:
models_fn.append(line.strip())
return models_fn
def get_point_cloud_data(model_name):
"""
Get point cloud data needed for evaluation
:param model_name: str
:return:
points: N x 3, numpy.ndarray(float)
point_gt_labels: N x 1, numpy.ndarray(int)
point_pred_labels: N x 1, numpy.ndarray(int)
point_pred_feat: N x 31, numpy.ndarray(float)
point_face_index: N x 1, numpy.ndarray(int)
"""
# Get points
points, _ = read_ply(os.path.join(BUILDNET_PTS_DIR, model_name + ".ply"))
# Get ground truth labels
with open(os.path.join(BUILDNET_PTS_LABELS_DIR, model_name + "_label.json"), 'r') as fin_json:
labels_json = json.load(fin_json)
point_gt_labels = np.fromiter(labels_json.values(), dtype=int)[:, np.newaxis]
assert (points.shape[0] == point_gt_labels.shape[0])
# Get per point features (probabilities)
try:
point_feat = np.load(os.path.join(NET_RESULTS_DIR, model_fn + ".npy"))
except FileNotFoundError:
point_feat = np.zeros((point_gt_labels.shape[0], len(toplabels) - 1))
assert (point_feat.shape[0] == point_gt_labels.shape[0])
assert (point_feat.shape[1] == (len(toplabels) - 1))
# Calculate pred label
point_pred_labels = np.argmax(point_feat, axis=1)[:,
np.newaxis] + 1 # we exclude undetermined (label 0) during training
assert (point_gt_labels.shape == point_pred_labels.shape)
# Get points face index
with open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR, model_name + ".txt"), 'r') as fin_txt:
point_face_index = fin_txt.readlines()
point_face_index = np.asarray([int(p.strip()) for p in point_face_index], dtype=int)[:, np.newaxis]
assert (point_face_index.shape == point_gt_labels.shape)
return points, point_gt_labels, point_pred_labels, point_feat, point_face_index
def get_mesh_data_n_labels(model_name):
"""
Get mesh data needed for evaluation
:param model_name: str
:return:
vertices: N x 3, numpy.ndarray(float)
faces: M x 3, numpy.ndarray(int)
face_labels: M x 1, numpy.ndarray(int)
components: M x 1, numpy.ndarray(float)
face_area: M x 1, numpy.ndarray(float)
"""
# Load obj
vertices, faces, components = read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR, model_name + ".obj"))
# Calculate face area
faces -= 1
face_area = calculate_face_area(vertices=vertices, faces=faces)
assert (face_area.shape[0] == faces.shape[0])
# Read components to labels
with open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR, model_name + "_label.json"), 'r') as fin_json:
labels_json = json.load(fin_json)
face_labels = np.zeros_like(components)
for comp, label in labels_json.items():
face_labels[np.where(components == int(comp))[0]] = label
return vertices, faces, face_labels, components, face_area
def save_pred_in_json(labels, fn_json):
"""
Save labels in json format
:param labels: N x 1, numpy.ndarray(int)
:param fn_json: str
:return:
None
"""
# Convert numpy to dict
labels_json = dict(zip(np.arange(labels.shape[0]).astype(str), np.squeeze(labels).tolist()))
# Export json file
with open(fn_json, 'w') as fout_json:
json.dump(labels_json, fout_json)
if __name__ == "__main__":
top_k = 200
best_iou_model = np.zeros((top_k,))
best_iou_model[:] = 1e-9
best_model_points_pred, best_model_triangles_pred, best_model_comp_pred, best_model_fn = [[] for _ in range(top_k)], \
[[] for _ in range(top_k)], \
[[] for _ in range(top_k)], \
[[] for _ in range(top_k)]
# Get model names
models_fn = get_split_models(split_fn=BUILDNET_TEST_SPLIT)
point_buildings_iou, mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp, mesh_buildings_iou_from_tr_max_pool, \
mesh_buildings_iou_from_comp_max_pool = {}, {}, {}, {}, {}
point_buildings_acc, mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp, mesh_buildings_acc_from_tr_max_pool, \
mesh_buildings_acc_from_comp_max_pool = {}, {}, {}, {}, {}
print("Calculate part and shape IOU for point and mesh tracks")
for model_fn in tqdm(models_fn):
# Get point cloud data
points, point_gt_labels, point_pred_labels, point_feat, point_face_index = get_point_cloud_data(model_fn)
# Get mesh data
vertices, faces, face_gt_labels, components, face_area = get_mesh_data_n_labels(model_fn)
# Infer face labels from point predictions
face_pred_labels_from_tr, face_pred_labels_from_comp, face_feat_from_tr, face_feat_from_comp, \
face_pred_labels_from_tr_max_pool, face_pred_labels_from_comp_max_pool = \
transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=True)
# Calculate point building iou
point_buildings_iou[model_fn] = get_building_point_iou(point_gt_labels, point_pred_labels)
# Calculate mesh building iou
mesh_buildings_iou_from_tr[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr,
face_area)
mesh_buildings_iou_from_comp[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp,
face_area)
mesh_buildings_iou_from_tr_max_pool[model_fn] = \
get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr_max_pool, face_area)
mesh_buildings_iou_from_comp_max_pool[model_fn] = \
get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp_max_pool, face_area)
# Calculate classification accuracy
point_buildings_acc[model_fn] = classification_accuracy(point_gt_labels, point_pred_labels)
mesh_buildings_acc_from_tr[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_tr)
mesh_buildings_acc_from_comp[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_comp)
mesh_buildings_acc_from_tr_max_pool[model_fn] = \
classification_accuracy(face_gt_labels, face_pred_labels_from_tr_max_pool)
mesh_buildings_acc_from_comp_max_pool[model_fn] = \
classification_accuracy(face_gt_labels, face_pred_labels_from_comp_max_pool)
# Save mesh feat data
np.save(os.path.join(FACE_FEAT_FROM_TR_DIR, model_fn + ".npy"), face_feat_from_tr.astype(np.float32))
np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR, model_fn + ".npy"), face_feat_from_comp.astype(np.float32))
# Save best and worst model
label_iou = mesh_buildings_iou_from_comp[model_fn]["label_iou"]
s_iou = np.sum([v for v in label_iou.values()]) / float(len(label_iou)) + 1 # handle cases where iou=0
if s_iou > best_iou_model[-1]:
best_iou_model[top_k - 1] = s_iou
best_model_points_pred[top_k - 1] = point_pred_labels
best_model_triangles_pred[top_k - 1] = face_pred_labels_from_tr
best_model_comp_pred[top_k - 1] = face_pred_labels_from_comp
best_model_fn[top_k - 1] = model_fn
sort_idx = np.argsort(1 / np.asarray(best_iou_model)).tolist()
best_iou_model = best_iou_model[sort_idx]
best_model_points_pred = [best_model_points_pred[idx] for idx in sort_idx]
best_model_triangles_pred = [best_model_triangles_pred[idx] for idx in sort_idx]
best_model_comp_pred = [best_model_comp_pred[idx] for idx in sort_idx]
best_model_fn = [best_model_fn[idx] for idx in sort_idx]
best_iou_model -= 1 # restore to original values
# Calculate avg point part and shape IOU
point_shape_iou = get_shape_iou(buildings_iou=point_buildings_iou)
point_part_iou = get_part_iou(buildings_iou=point_buildings_iou)
mesh_shape_iou_from_tr = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr)
mesh_part_iou_from_tr = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr)
mesh_shape_iou_from_comp = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp)
mesh_part_iou_from_comp = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp)
mesh_shape_iou_from_tr_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool)
mesh_part_iou_from_tr_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool)
mesh_shape_iou_from_comp_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool)
mesh_part_iou_from_comp_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool)
point_acc = np.sum([acc for acc in point_buildings_acc.values()]) / float(len(point_buildings_acc))
mesh_acc_from_tr = np.sum([acc for acc in mesh_buildings_acc_from_tr.values()]) / float(
len(mesh_buildings_acc_from_tr))
mesh_acc_from_comp = np.sum([acc for acc in mesh_buildings_acc_from_comp.values()]) / float(
len(mesh_buildings_acc_from_comp))
mesh_acc_from_tr_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_tr_max_pool.values()]) / float(
len(mesh_buildings_acc_from_tr_max_pool))
mesh_acc_from_comp_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_comp_max_pool.values()]) / float(
len(mesh_buildings_acc_from_comp_max_pool))
# Save best
buf = ''
for i in range(top_k):
print(best_iou_model[i]); print(best_model_fn[i])
buf += "Best model iou: " + str(best_iou_model[i]) + ", " + best_model_fn[i] + '\n'
save_pred_in_json(best_model_points_pred[i], os.path.join(BEST_POINTS_DIR, best_model_fn[i] + "_label.json"))
save_pred_in_json(best_model_triangles_pred[i],
os.path.join(BEST_TRIANGLES_DIR, best_model_fn[i] + "_label.json"))
save_pred_in_json(best_model_comp_pred[i], os.path.join(BEST_COMP_DIR, best_model_fn[i] + "_label.json"))
# Log results
buf += "Point Classification Accuracy: " + str(np.round(point_acc * 100, 2)) + '\n' \
"Point Shape IoU: " + str(
np.round(point_shape_iou['all'] * 100, 2)) + '\n' \
"Point Part IoU: " + str(
np.round(point_part_iou['all'] * 100, 2)) + '\n' \
"Point Part IoU - FR: " + str(
np.round(point_part_iou['fr-part'] * 100, 2)) + '\n' \
"Per label point part IoU: " + ", ".join([label + ": " +
str(np.round(
point_part_iou[
label] * 100,
2)) for label in
toplabels.values() if
label != "undetermined"]) + '\n' \
"Average Pooling" + '\n' \
"---------------" + '\n' \
"Mesh Classification Accuracy From Triangles: " + str(
np.round(mesh_acc_from_tr * 100, 2)) + '\n' \
"Mesh Shape IoU From Triangles: " + str(
np.round(mesh_shape_iou_from_tr['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Triangles: " + str(
np.round(mesh_part_iou_from_tr['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Triangles - FR: " + str(
np.round(mesh_part_iou_from_tr['fr-part'] * 100, 2)) + '\n' \
"Mesh Classification Accuracy From Comp: " + str(
np.round(mesh_acc_from_comp * 100, 2)) + '\n' \
"Mesh Shape IoU From Comp: " + str(
np.round(mesh_shape_iou_from_comp['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Comp: " + str(
np.round(mesh_part_iou_from_comp['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Comp- FR: " + str(
np.round(mesh_part_iou_from_comp['fr-part'] * 100, 2)) + '\n' \
"Per label mesh part IoU from triangles: " + ", ".join(
[label + ": " +
str(np.round(mesh_part_iou_from_tr[label][0] * 100, 2)) for label in toplabels.values() if
label != "undetermined"]) + '\n' \
"Per label mesh part IoU from comp: " + ", ".join([label + ": " +
str(np.round(
mesh_part_iou_from_comp[
label][0] * 100, 2)) for
label in toplabels.values() if
label != "undetermined"]) + '\n' \
"Max Pooling" + '\n' \
"-----------" + '\n' \
"Mesh Classification Accuracy From Triangles: " + str(
np.round(mesh_acc_from_tr_max_pool * 100, 2)) + '\n' \
"Mesh Shape IoU From Triangles: " + str(
np.round(mesh_shape_iou_from_tr_max_pool['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Triangles: " + str(
np.round(mesh_part_iou_from_tr_max_pool['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Triangles - FR: " + str(
np.round(mesh_part_iou_from_tr_max_pool['fr-part'] * 100, 2)) + '\n' \
"Mesh Classification Accuracy From Comp: " + str(
np.round(mesh_acc_from_comp_max_pool * 100, 2)) + '\n' \
"Mesh Shape IoU From Comp: " + str(
np.round(mesh_shape_iou_from_comp_max_pool['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Comp: " + str(
np.round(mesh_part_iou_from_comp_max_pool['all'] * 100, 2)) + '\n' \
"Mesh Part IoU From Comp- FR: " + str(
np.round(mesh_part_iou_from_comp_max_pool['fr-part'] * 100, 2)) + '\n' \
"Per label mesh part IoU from triangles: " + ", ".join(
[label + ": " +
str(np.round(mesh_part_iou_from_tr_max_pool[label][0] * 100, 2)) for label in toplabels.values() if
label != "undetermined"]) + '\n' \
"Per label mesh part IoU from comp: " + ", ".join([label + ": " +
str(np.round(
mesh_part_iou_from_comp_max_pool[
label][0] * 100, 2)) for
label in toplabels.values() if
label != "undetermined"]) + '\n'
print(buf)
with open(os.path.join(NET_RESULTS_DIR, "results_log.txt"), 'w') as fout_txt:
fout_txt.write(buf)
|
[
"json.dump",
"tqdm.tqdm",
"json.load",
"os.makedirs",
"os.path.isdir",
"evaluation.mesh_utils.nearest_neighbour_of_face_centers",
"os.path.isfile",
"evaluation.mesh_utils.compute_face_centers",
"evaluation.mesh_utils.calculate_face_area",
"os.path.join"
] |
[((267, 346), 'os.path.join', 'os.path.join', (['os.sep', '"""media"""', '"""maria"""', '"""BigData1"""', '"""Maria"""', '"""buildnet_data_2k"""'], {}), "(os.sep, 'media', 'maria', 'BigData1', 'Maria', 'buildnet_data_2k')\n", (279, 346), False, 'import os\n'), ((355, 387), 'os.path.isdir', 'os.path.isdir', (['BUILDNET_BASE_DIR'], {}), '(BUILDNET_BASE_DIR)\n', (368, 387), False, 'import os\n'), ((408, 477), 'os.path.join', 'os.path.join', (['BUILDNET_BASE_DIR', '"""flippedNormal_unit_obj_withtexture"""'], {}), "(BUILDNET_BASE_DIR, 'flippedNormal_unit_obj_withtexture')\n", (420, 477), False, 'import os\n'), ((486, 517), 'os.path.isdir', 'os.path.isdir', (['BUILDNET_OBJ_DIR'], {}), '(BUILDNET_OBJ_DIR)\n', (499, 517), False, 'import os\n'), ((538, 605), 'os.path.join', 'os.path.join', (['BUILDNET_BASE_DIR', '"""100K_inverted_normals"""', '"""nocolor"""'], {}), "(BUILDNET_BASE_DIR, '100K_inverted_normals', 'nocolor')\n", (550, 605), False, 'import os\n'), ((658, 733), 'os.path.join', 'os.path.join', (['BUILDNET_BASE_DIR', '"""100K_inverted_normals"""', '"""point_labels_32"""'], {}), "(BUILDNET_BASE_DIR, '100K_inverted_normals', 'point_labels_32')\n", (670, 733), False, 'import os\n'), ((796, 865), 'os.path.join', 'os.path.join', (['BUILDNET_BASE_DIR', '"""100K_inverted_normals"""', '"""faceindex"""'], {}), "(BUILDNET_BASE_DIR, '100K_inverted_normals', 'faceindex')\n", (808, 865), False, 'import os\n'), ((874, 915), 'os.path.isdir', 'os.path.isdir', (['BUILDNET_PTS_FACEINDEX_DIR'], {}), '(BUILDNET_PTS_FACEINDEX_DIR)\n', (887, 915), False, 'import os\n'), ((947, 1025), 'os.path.join', 'os.path.join', (['BUILDNET_BASE_DIR', '"""100K_inverted_normals"""', '"""component_label_32"""'], {}), "(BUILDNET_BASE_DIR, '100K_inverted_normals', 'component_label_32')\n", (959, 1025), False, 'import os\n'), ((1034, 1076), 'os.path.isdir', 'os.path.isdir', (['BUILDNET_COMP_TO_LABELS_DIR'], {}), '(BUILDNET_COMP_TO_LABELS_DIR)\n', (1047, 1076), False, 'import os\n'), ((1100, 1142), 'os.path.join', 'os.path.join', (['BUILDNET_BASE_DIR', '"""dataset"""'], {}), "(BUILDNET_BASE_DIR, 'dataset')\n", (1112, 1142), False, 'import os\n'), ((1151, 1185), 'os.path.isdir', 'os.path.isdir', (['BUILDNET_SPLITS_DIR'], {}), '(BUILDNET_SPLITS_DIR)\n', (1164, 1185), False, 'import os\n'), ((1209, 1260), 'os.path.join', 'os.path.join', (['BUILDNET_SPLITS_DIR', '"""test_split.txt"""'], {}), "(BUILDNET_SPLITS_DIR, 'test_split.txt')\n", (1221, 1260), False, 'import os\n'), ((1269, 1304), 'os.path.isfile', 'os.path.isfile', (['BUILDNET_TEST_SPLIT'], {}), '(BUILDNET_TEST_SPLIT)\n', (1283, 1304), False, 'import os\n'), ((1373, 1403), 'os.path.isdir', 'os.path.isdir', (['NET_RESULTS_DIR'], {}), '(NET_RESULTS_DIR)\n', (1386, 1403), False, 'import os\n'), ((1462, 1506), 'os.path.join', 'os.path.join', (['NET_RESULTS_DIR', '"""best_points"""'], {}), "(NET_RESULTS_DIR, 'best_points')\n", (1474, 1506), False, 'import os\n'), ((1507, 1550), 'os.makedirs', 'os.makedirs', (['BEST_POINTS_DIR'], {'exist_ok': '(True)'}), '(BEST_POINTS_DIR, exist_ok=True)\n', (1518, 1550), False, 'import os\n'), ((1572, 1619), 'os.path.join', 'os.path.join', (['NET_RESULTS_DIR', '"""best_triangles"""'], {}), "(NET_RESULTS_DIR, 'best_triangles')\n", (1584, 1619), False, 'import os\n'), ((1620, 1666), 'os.makedirs', 'os.makedirs', (['BEST_TRIANGLES_DIR'], {'exist_ok': '(True)'}), '(BEST_TRIANGLES_DIR, exist_ok=True)\n', (1631, 1666), False, 'import os\n'), ((1683, 1725), 'os.path.join', 'os.path.join', (['NET_RESULTS_DIR', '"""best_comp"""'], {}), "(NET_RESULTS_DIR, 'best_comp')\n", (1695, 1725), False, 'import os\n'), ((1726, 1767), 'os.makedirs', 'os.makedirs', (['BEST_COMP_DIR'], {'exist_ok': '(True)'}), '(BEST_COMP_DIR, exist_ok=True)\n', (1737, 1767), False, 'import os\n'), ((1843, 1893), 'os.path.join', 'os.path.join', (['NET_RESULTS_DIR', '"""face_feat_from_tr"""'], {}), "(NET_RESULTS_DIR, 'face_feat_from_tr')\n", (1855, 1893), False, 'import os\n'), ((1894, 1943), 'os.makedirs', 'os.makedirs', (['FACE_FEAT_FROM_TR_DIR'], {'exist_ok': '(True)'}), '(FACE_FEAT_FROM_TR_DIR, exist_ok=True)\n', (1905, 1943), False, 'import os\n'), ((1970, 2022), 'os.path.join', 'os.path.join', (['NET_RESULTS_DIR', '"""face_feat_from_comp"""'], {}), "(NET_RESULTS_DIR, 'face_feat_from_comp')\n", (1982, 2022), False, 'import os\n'), ((2023, 2074), 'os.makedirs', 'os.makedirs', (['FACE_FEAT_FROM_COMP_DIR'], {'exist_ok': '(True)'}), '(FACE_FEAT_FROM_COMP_DIR, exist_ok=True)\n', (2034, 2074), False, 'import os\n'), ((4547, 4595), 'evaluation.mesh_utils.compute_face_centers', 'compute_face_centers', (['faces', 'unsampled', 'vertices'], {}), '(faces, unsampled, vertices)\n', (4567, 4595), False, 'from evaluation.mesh_utils import read_obj, read_ply, calculate_face_area, compute_face_centers, nearest_neighbour_of_face_centers\n'), ((4724, 4852), 'evaluation.mesh_utils.nearest_neighbour_of_face_centers', 'nearest_neighbour_of_face_centers', (['face_centers', 'face_feat_from_tr_avg_pool', 'face_point_index', 'point_feat', 'points', 'unsampled'], {}), '(face_centers, face_feat_from_tr_avg_pool,\n face_point_index, point_feat, points, unsampled)\n', (4757, 4852), False, 'from evaluation.mesh_utils import read_obj, read_ply, calculate_face_area, compute_face_centers, nearest_neighbour_of_face_centers\n'), ((9674, 9725), 'evaluation.mesh_utils.calculate_face_area', 'calculate_face_area', ([], {'vertices': 'vertices', 'faces': 'faces'}), '(vertices=vertices, faces=faces)\n', (9693, 9725), False, 'from evaluation.mesh_utils import read_obj, read_ply, calculate_face_area, compute_face_centers, nearest_neighbour_of_face_centers\n'), ((11727, 11742), 'tqdm.tqdm', 'tqdm', (['models_fn'], {}), '(models_fn)\n', (11731, 11742), False, 'from tqdm import tqdm\n'), ((7772, 7823), 'os.path.join', 'os.path.join', (['BUILDNET_PTS_DIR', "(model_name + '.ply')"], {}), "(BUILDNET_PTS_DIR, model_name + '.ply')\n", (7784, 7823), False, 'import os\n'), ((7977, 7996), 'json.load', 'json.load', (['fin_json'], {}), '(fin_json)\n', (7986, 7996), False, 'import json\n'), ((9934, 9953), 'json.load', 'json.load', (['fin_json'], {}), '(fin_json)\n', (9943, 9953), False, 'import json\n'), ((10543, 10576), 'json.dump', 'json.dump', (['labels_json', 'fout_json'], {}), '(labels_json, fout_json)\n', (10552, 10576), False, 'import json\n'), ((7870, 7935), 'os.path.join', 'os.path.join', (['BUILDNET_PTS_LABELS_DIR', "(model_name + '_label.json')"], {}), "(BUILDNET_PTS_LABELS_DIR, model_name + '_label.json')\n", (7882, 7935), False, 'import os\n'), ((8220, 8268), 'os.path.join', 'os.path.join', (['NET_RESULTS_DIR', "(model_fn + '.npy')"], {}), "(NET_RESULTS_DIR, model_fn + '.npy')\n", (8232, 8268), False, 'import os\n'), ((8779, 8840), 'os.path.join', 'os.path.join', (['BUILDNET_PTS_FACEINDEX_DIR', "(model_name + '.txt')"], {}), "(BUILDNET_PTS_FACEINDEX_DIR, model_name + '.txt')\n", (8791, 8840), False, 'import os\n'), ((9563, 9614), 'os.path.join', 'os.path.join', (['BUILDNET_OBJ_DIR', "(model_name + '.obj')"], {}), "(BUILDNET_OBJ_DIR, model_name + '.obj')\n", (9575, 9614), False, 'import os\n'), ((9823, 9892), 'os.path.join', 'os.path.join', (['BUILDNET_COMP_TO_LABELS_DIR', "(model_name + '_label.json')"], {}), "(BUILDNET_COMP_TO_LABELS_DIR, model_name + '_label.json')\n", (9835, 9892), False, 'import os\n'), ((13960, 14014), 'os.path.join', 'os.path.join', (['FACE_FEAT_FROM_TR_DIR', "(model_fn + '.npy')"], {}), "(FACE_FEAT_FROM_TR_DIR, model_fn + '.npy')\n", (13972, 14014), False, 'import os\n'), ((14070, 14126), 'os.path.join', 'os.path.join', (['FACE_FEAT_FROM_COMP_DIR', "(model_fn + '.npy')"], {}), "(FACE_FEAT_FROM_COMP_DIR, model_fn + '.npy')\n", (14082, 14126), False, 'import os\n'), ((17165, 17228), 'os.path.join', 'os.path.join', (['BEST_POINTS_DIR', "(best_model_fn[i] + '_label.json')"], {}), "(BEST_POINTS_DIR, best_model_fn[i] + '_label.json')\n", (17177, 17228), False, 'import os\n'), ((17312, 17378), 'os.path.join', 'os.path.join', (['BEST_TRIANGLES_DIR', "(best_model_fn[i] + '_label.json')"], {}), "(BEST_TRIANGLES_DIR, best_model_fn[i] + '_label.json')\n", (17324, 17378), False, 'import os\n'), ((17431, 17492), 'os.path.join', 'os.path.join', (['BEST_COMP_DIR', "(best_model_fn[i] + '_label.json')"], {}), "(BEST_COMP_DIR, best_model_fn[i] + '_label.json')\n", (17443, 17492), False, 'import os\n'), ((24492, 24540), 'os.path.join', 'os.path.join', (['NET_RESULTS_DIR', '"""results_log.txt"""'], {}), "(NET_RESULTS_DIR, 'results_log.txt')\n", (24504, 24540), False, 'import os\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import, unused-wildcard-import
"""Contrib NDArray API of MXNet."""
import math
from ..context import current_context
from ..random import uniform
from ..base import _as_list
from . import ndarray
try:
from .gen_contrib import *
except ImportError:
pass
__all__ = ["rand_zipfian"]
# pylint: disable=line-too-long
def rand_zipfian(true_classes, num_sampled, range_max, ctx=None):
"""Draw random samples from an approximately log-uniform or Zipfian distribution.
This operation randomly samples *num_sampled* candidates the range of integers [0, range_max).
The elements of sampled_candidates are drawn with replacement from the base distribution.
The base distribution for this operator is an approximately log-uniform or Zipfian distribution:
P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)
This sampler is useful when the true classes approximately follow such a distribution.
For example, if the classes represent words in a lexicon sorted in decreasing order of \
frequency. If your classes are not ordered by decreasing frequency, do not use this op.
Additionaly, it also returns the number of times each of the \
true classes and the sampled classes is expected to occur.
Parameters
----------
true_classes : NDArray
A 1-D NDArray of the target classes.
num_sampled: int
The number of classes to randomly sample.
range_max: int
The number of possible classes.
ctx : Context
Device context of output. Default is current context.
Returns
-------
samples: NDArray
The sampled candidate classes in 1-D `int64` dtype.
expected_count_true: NDArray
The expected count for true classes in 1-D `float64` dtype.
expected_count_sample: NDArray
The expected count for sampled candidates in 1-D `float64` dtype.
Examples
--------
>>> true_cls = mx.nd.array([3])
>>> samples, exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4, 5)
>>> samples
[1 3 3 3]
<NDArray 4 @cpu(0)>
>>> exp_count_true
[ 0.12453879]
<NDArray 1 @cpu(0)>
>>> exp_count_sample
[ 0.22629439 0.12453879 0.12453879 0.12453879]
<NDArray 4 @cpu(0)>
"""
if ctx is None:
ctx = current_context()
log_range = math.log(range_max + 1)
rand = uniform(0, log_range, shape=(num_sampled,), dtype='float64', ctx=ctx)
# make sure sampled_classes are in the range of [0, range_max)
sampled_classes = (rand.exp() - 1).astype('int64') % range_max
true_cls = true_classes.as_in_context(ctx).astype('float64')
expected_count_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range * num_sampled
# cast sampled classes to fp64 to avoid interget division
sampled_cls_fp64 = sampled_classes.astype('float64')
expected_prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range
expected_count_sampled = expected_prob_sampled * num_sampled
return sampled_classes, expected_count_true, expected_count_sampled
# pylint: enable=line-too-long
def foreach(body, data, init_states):
"""Run a for loop with user-defined computation over NDArrays on dimension 0.
This operator simulates a for loop and body has the computation for an iteration
of the for loop. It runs the computation in body on each slice from the input
NDArrays.
body takes two arguments as input and outputs a tuple of two elements,
as illustrated below:
out, states = body(data1, states)
data1 can be either an NDArray or a list of NDArrays. If data is an NDArray,
data1 is an NDArray. Otherwise, data1 is a list of NDArrays and has the same
size as data. states is a list of NDArrays and have the same size as init_states.
Similarly, out can be either an NDArray or a list of NDArrays, which are concatenated
as the first output of foreach; states from the last execution of body
are the second output of foreach.
The computation done by this operator is equivalent to the pseudo code below
when the input data is NDArray:
states = init_states
outs = []
for i in data.shape[0]:
s = data[i]
out, states = body(s, states)
outs.append(out)
outs = stack(*outs)
Parameters
----------
body : a Python function.
Define computation in an iteration.
data: an NDArray or a list of NDArrays.
The input data.
init_states: an NDArray or a list of NDArrays.
The initial values of the loop states.
name: string.
The name of the operator.
Returns
-------
outputs: an NDArray or a list of NDArrays.
The output data concatenated from the output of all iterations.
states: a list of NDArrays.
The loop states in the last iteration.
Examples
--------
>>> step = lambda data, states: (data + states[0], [states[0] * 2])
>>> data = mx.nd.random.uniform(shape=(2, 10))
>>> states = [mx.nd.random.uniform(shape=(10))]
>>> outs, states = mx.nd.contrib.foreach(step, data, states)
"""
def check_input(inputs, in_type, msg):
is_NDArray_or_list = True
if isinstance(inputs, list):
for i in inputs:
if not isinstance(i, in_type):
is_NDArray_or_list = False
break
else:
is_NDArray_or_list = isinstance(inputs, in_type)
assert is_NDArray_or_list, msg
check_input(data, ndarray.NDArray, "data should be an NDArray or a list of NDArrays")
check_input(init_states, ndarray.NDArray,
"init_states should be an NDArray or a list of NDArrays")
not_data_list = isinstance(data, ndarray.NDArray)
num_iters = data.shape[0] if not_data_list else data[0].shape[0]
states = init_states
outputs = []
for i in range(num_iters):
if not_data_list:
eles = data[i]
else:
eles = [d[i] for d in data]
outs, states = body(eles, states)
outs = _as_list(outs)
outputs.append(outs)
outputs = zip(*outputs)
tmp_outputs = []
for out in outputs:
tmp_outputs.append(ndarray.op.stack(*out))
outputs = tmp_outputs
if not_data_list and len(outputs) == 1:
outputs = outputs[0]
return (outputs, states)
|
[
"math.log"
] |
[((3163, 3186), 'math.log', 'math.log', (['(range_max + 1)'], {}), '(range_max + 1)\n', (3171, 3186), False, 'import math\n')]
|
# 1.3: (intended?) Behavior change with empty apply #41997
import pandas as pd
print(pd.__version__)
df = pd.DataFrame(columns=["a", "b"])
df["a"] = df.apply(lambda x: x["a"], axis=1)
print(df)
|
[
"pandas.DataFrame"
] |
[((109, 141), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['a', 'b']"}), "(columns=['a', 'b'])\n", (121, 141), True, 'import pandas as pd\n')]
|
from random import randint
from threading import Event
from unittest.mock import patch, MagicMock
from uuid import uuid4
import pytest
from pyzeebe import ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job
from pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter
from pyzeebe.task.task import Task
from pyzeebe.worker.task_handler import ZeebeTaskHandler
from tests.unit.utils.gateway_mock import GatewayMock
from tests.unit.utils.random_utils import random_job
@pytest.fixture
def job_with_adapter(zeebe_adapter):
return random_job(zeebe_adapter=zeebe_adapter)
@pytest.fixture
def job_without_adapter():
return random_job()
@pytest.fixture
def job_from_task(task):
job = random_job(task)
job.variables = dict(x=str(uuid4()))
return job
@pytest.fixture
def zeebe_adapter(grpc_create_channel):
return ZeebeAdapter(channel=grpc_create_channel())
@pytest.fixture
def zeebe_client(grpc_create_channel):
return ZeebeClient(channel=grpc_create_channel())
@pytest.fixture
def zeebe_worker(zeebe_adapter):
worker = ZeebeWorker()
worker.zeebe_adapter = zeebe_adapter
return worker
@pytest.fixture
def task(task_type):
return Task(task_type, MagicMock(wraps=lambda x: dict(x=x)), MagicMock(wraps=lambda x, y, z: x))
@pytest.fixture
def task_type():
return str(uuid4())
@pytest.fixture
def stop_after_test():
stop_test = Event()
yield stop_test
stop_test.set()
@pytest.fixture
def handle_task_mock():
with patch("pyzeebe.worker.worker.ZeebeWorker._handle_task") as mock:
yield mock
@pytest.fixture
def stop_event_mock(zeebe_worker):
with patch.object(zeebe_worker, "stop_event") as mock:
yield mock
@pytest.fixture
def handle_not_alive_thread_spy(mocker):
spy = mocker.spy(ZeebeWorker, "_handle_not_alive_thread")
yield spy
@pytest.fixture
def router():
return ZeebeTaskRouter()
@pytest.fixture
def routers():
return [ZeebeTaskRouter() for _ in range(0, randint(2, 100))]
@pytest.fixture
def task_handler():
return ZeebeTaskHandler()
@pytest.fixture
def decorator():
def simple_decorator(job: Job) -> Job:
return job
return MagicMock(wraps=simple_decorator)
@pytest.fixture(scope="module")
def grpc_add_to_server():
from zeebe_grpc.gateway_pb2_grpc import add_GatewayServicer_to_server
return add_GatewayServicer_to_server
@pytest.fixture(scope="module")
def grpc_servicer():
return GatewayMock()
@pytest.fixture(scope="module")
def grpc_stub_cls(grpc_channel):
from zeebe_grpc.gateway_pb2_grpc import GatewayStub
return GatewayStub
|
[
"unittest.mock.patch.object",
"uuid.uuid4",
"unittest.mock.MagicMock",
"random.randint",
"pyzeebe.ZeebeTaskRouter",
"pytest.fixture",
"tests.unit.utils.random_utils.random_job",
"tests.unit.utils.gateway_mock.GatewayMock",
"pyzeebe.worker.task_handler.ZeebeTaskHandler",
"unittest.mock.patch",
"threading.Event",
"pyzeebe.ZeebeWorker"
] |
[((2206, 2236), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2220, 2236), False, 'import pytest\n'), ((2381, 2411), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2395, 2411), False, 'import pytest\n'), ((2461, 2491), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2475, 2491), False, 'import pytest\n'), ((531, 570), 'tests.unit.utils.random_utils.random_job', 'random_job', ([], {'zeebe_adapter': 'zeebe_adapter'}), '(zeebe_adapter=zeebe_adapter)\n', (541, 570), False, 'from tests.unit.utils.random_utils import random_job\n'), ((627, 639), 'tests.unit.utils.random_utils.random_job', 'random_job', ([], {}), '()\n', (637, 639), False, 'from tests.unit.utils.random_utils import random_job\n'), ((693, 709), 'tests.unit.utils.random_utils.random_job', 'random_job', (['task'], {}), '(task)\n', (703, 709), False, 'from tests.unit.utils.random_utils import random_job\n'), ((1054, 1067), 'pyzeebe.ZeebeWorker', 'ZeebeWorker', ([], {}), '()\n', (1065, 1067), False, 'from pyzeebe import ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job\n'), ((1383, 1390), 'threading.Event', 'Event', ([], {}), '()\n', (1388, 1390), False, 'from threading import Event\n'), ((1875, 1892), 'pyzeebe.ZeebeTaskRouter', 'ZeebeTaskRouter', ([], {}), '()\n', (1890, 1892), False, 'from pyzeebe import ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job\n'), ((2041, 2059), 'pyzeebe.worker.task_handler.ZeebeTaskHandler', 'ZeebeTaskHandler', ([], {}), '()\n', (2057, 2059), False, 'from pyzeebe.worker.task_handler import ZeebeTaskHandler\n'), ((2169, 2202), 'unittest.mock.MagicMock', 'MagicMock', ([], {'wraps': 'simple_decorator'}), '(wraps=simple_decorator)\n', (2178, 2202), False, 'from unittest.mock import patch, MagicMock\n'), ((2444, 2457), 'tests.unit.utils.gateway_mock.GatewayMock', 'GatewayMock', ([], {}), '()\n', (2455, 2457), False, 'from tests.unit.utils.gateway_mock import GatewayMock\n'), ((1231, 1265), 'unittest.mock.MagicMock', 'MagicMock', ([], {'wraps': '(lambda x, y, z: x)'}), '(wraps=lambda x, y, z: x)\n', (1240, 1265), False, 'from unittest.mock import patch, MagicMock\n'), ((1317, 1324), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1322, 1324), False, 'from uuid import uuid4\n'), ((1482, 1537), 'unittest.mock.patch', 'patch', (['"""pyzeebe.worker.worker.ZeebeWorker._handle_task"""'], {}), "('pyzeebe.worker.worker.ZeebeWorker._handle_task')\n", (1487, 1537), False, 'from unittest.mock import patch, MagicMock\n'), ((1628, 1668), 'unittest.mock.patch.object', 'patch.object', (['zeebe_worker', '"""stop_event"""'], {}), "(zeebe_worker, 'stop_event')\n", (1640, 1668), False, 'from unittest.mock import patch, MagicMock\n'), ((1938, 1955), 'pyzeebe.ZeebeTaskRouter', 'ZeebeTaskRouter', ([], {}), '()\n', (1953, 1955), False, 'from pyzeebe import ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job\n'), ((741, 748), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (746, 748), False, 'from uuid import uuid4\n'), ((1974, 1989), 'random.randint', 'randint', (['(2)', '(100)'], {}), '(2, 100)\n', (1981, 1989), False, 'from random import randint\n')]
|
import tensorflow as tf
import numpy as np
# training set. Contains a row of size 5 per train example. The row is same as a sentence, with words replaced
# by its equivalent unique index. The below dataset contains 6 unique words numbered 0-5. Ideally the word vector for
# 4 and 5 indexed words should be same.
X_train = np.array([[0,1,4,2,3],[0,1,5,2,3]])
# output dummy for testing purpose
y_train = np.array([0,1])
# Create the embeddings
with tf.name_scope("embeddings"):
# Initiliaze the embedding vector by randomly distributing the weights.
embedding = tf.Variable(tf.random_uniform((6,
3), -1, 1))
# create the embedding layer
embed = tf.nn.embedding_lookup(embedding, X_train)
# So that we can apply a convolution 2d operations on top the expanded single channel embedded vectors
embedded_chars_expanded = tf.expand_dims(embed, -1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer());
result,result_expanded = sess.run([embed,embedded_chars_expanded]);
print(result_expanded.shape)
print(result)
print(result_expanded)
# OUTPUT
# result
# [[[ 0.89598155 0.4275496 0.00858593]
# [ 0.21602225 -0.44228792 -0.20533657]
# [ 0.9624436 -0.99176955 0.15964746]
# [-0.29004955 0.470721 0.00804782]
# [ 0.7497003 0.6044979 -0.5612638 ]]
#
# [[ 0.89598155 0.4275496 0.00858593]
# [ 0.21602225 -0.44228792 -0.20533657]
# [-0.48809385 -0.55618596 -0.73995876]
# [-0.29004955 0.470721 0.00804782]
# [ 0.7497003 0.6044979 -0.5612638 ]]]
# result_expanded - has a dimension of (2,5,3,1)
# [[[[-0.45975637]
# [-0.5756638 ]
# [ 0.7002065 ]]
#
# [[ 0.2708087 ]
# [ 0.7985747 ]
# [ 0.57897186]]
#
# [[ 0.6642673 ]
# [ 0.6548476 ]
# [ 0.00760126]]
#
# [[-0.7074845 ]
# [ 0.5100081 ]
# [ 0.7232883 ]]
#
# [[ 0.19342017]
# [-0.46509933]
# [ 0.8361807 ]]]
#
#
# [[[-0.45975637]
# [-0.5756638 ]
# [ 0.7002065 ]]
#
# [[ 0.2708087 ]
# [ 0.7985747 ]
# [ 0.57897186]]
#
# [[-0.90803576]
# [ 0.75451994]
# [ 0.8864901 ]]
#
# [[-0.7074845 ]
# [ 0.5100081 ]
# [ 0.7232883 ]]
#
# [[ 0.19342017]
# [-0.46509933]
# [ 0.8361807 ]]]]
|
[
"tensorflow.random_uniform",
"tensorflow.nn.embedding_lookup",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"numpy.array",
"tensorflow.name_scope",
"tensorflow.expand_dims"
] |
[((324, 368), 'numpy.array', 'np.array', (['[[0, 1, 4, 2, 3], [0, 1, 5, 2, 3]]'], {}), '([[0, 1, 4, 2, 3], [0, 1, 5, 2, 3]])\n', (332, 368), True, 'import numpy as np\n'), ((406, 422), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (414, 422), True, 'import numpy as np\n'), ((453, 480), 'tensorflow.name_scope', 'tf.name_scope', (['"""embeddings"""'], {}), "('embeddings')\n", (466, 480), True, 'import tensorflow as tf\n'), ((695, 737), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'X_train'], {}), '(embedding, X_train)\n', (717, 737), True, 'import tensorflow as tf\n'), ((876, 901), 'tensorflow.expand_dims', 'tf.expand_dims', (['embed', '(-1)'], {}), '(embed, -1)\n', (890, 901), True, 'import tensorflow as tf\n'), ((908, 920), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (918, 920), True, 'import tensorflow as tf\n'), ((586, 618), 'tensorflow.random_uniform', 'tf.random_uniform', (['(6, 3)', '(-1)', '(1)'], {}), '((6, 3), -1, 1)\n', (603, 618), True, 'import tensorflow as tf\n'), ((943, 976), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (974, 976), True, 'import tensorflow as tf\n')]
|
import matplotlib.pyplot as plt
import yfinance as yf #To access the financial data available on Yahoo Finance
import numpy as np
def get_stock_data(tickerSymbol, start_date, end_date):
tickerData = yf.Ticker(tickerSymbol)
df_ticker = tickerData.history(period='1d', start=start_date, end=end_date)
return df_ticker
def prepare_data(s):
ymax = 1000
s = s * ymax / s.max() # scale y range
s = 1450 -s # The image top left is (0,0), so the horizon line is around -1450, so our plot should be above that
# smoothen the fig
window_size = len(s) // 150
s = s.rolling(window_size, min_periods=1).mean()
return s
def make_picture(stock_prices, img, x_width_image, horizon_height):
"""x_width_image: dedicated arg for more control, instead of taking image dim"""
fig, ax = plt.subplots()
ax.imshow(img)
x = np.linspace(0, x_width_image, len(stock_prices))
ax.fill_between(x, stock_prices, horizon_height, color='#081A1C')
plt.axis('off')
plt.tight_layout()
return fig
|
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.axis",
"yfinance.Ticker"
] |
[((206, 229), 'yfinance.Ticker', 'yf.Ticker', (['tickerSymbol'], {}), '(tickerSymbol)\n', (215, 229), True, 'import yfinance as yf\n'), ((824, 838), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (836, 838), True, 'import matplotlib.pyplot as plt\n'), ((992, 1007), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1000, 1007), True, 'import matplotlib.pyplot as plt\n'), ((1012, 1030), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1028, 1030), True, 'import matplotlib.pyplot as plt\n')]
|
import cv2
image=cv2.imread(r'md.jpg',flags=1)
print(image[0:100,0:100])
#Changes pixel value in the original images
#image[0:100,0:100]=255#fully white
image[0:100,0:100]=[165,42,42]#RGB format
cv2.imshow('New Image',image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"cv2.waitKey",
"cv2.imread",
"cv2.imshow",
"cv2.destroyAllWindows"
] |
[((17, 46), 'cv2.imread', 'cv2.imread', (['"""md.jpg"""'], {'flags': '(1)'}), "('md.jpg', flags=1)\n", (27, 46), False, 'import cv2\n'), ((195, 225), 'cv2.imshow', 'cv2.imshow', (['"""New Image"""', 'image'], {}), "('New Image', image)\n", (205, 225), False, 'import cv2\n'), ((225, 239), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (236, 239), False, 'import cv2\n'), ((240, 263), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (261, 263), False, 'import cv2\n')]
|
# Verified on https://leetcode.com/problems/implement-strstr
import string
from functools import lru_cache
@lru_cache(maxsize=None)
def get_char_code(ch, char_set=string.ascii_letters + string.digits):
# We could have also used:
# return ord(ch) - ord('0')
return char_set.index(ch)
def rabin_karp(haystack, needle):
# CAREFUL: Beware of these corner cases!
if needle == "":
return 0
if len(needle) == 0 or len(needle) > len(haystack):
return -1
HASH_MOD = 1000000007
# We can use any number as base, but its better to
# take the alphabet size to minimize collisions
BASE = 26
needle_hash = 0
haystack_hash = 0
for i in range(needle):
needle_char = get_char_code(needle[i])
haystack_char = get_char_code(haystack[i])
needle_hash = (BASE * needle_hash + needle_char) % HASH_MOD
haystack_hash = (BASE * haystack_hash + haystack_char) % HASH_MOD
if haystack_hash == needle_hash and needle == haystack[0 : len(needle)]:
return 0
# Now compute hashes on a rolling basis
base_power_up = pow(BASE, (len(needle) - 1), HASH_MOD)
for i in range(len(needle), len(haystack)):
haystack_start_pos = i + 1 - len(needle)
haystack_end_pos = i + 1
old_char = get_char_code(haystack[haystack_start_pos - 1])
ch = get_char_code(haystack[i])
haystack_hash = (
(haystack_hash - base_power_up * old_char) * BASE + ch
) % HASH_MOD
if (
haystack_hash == needle_hash
and needle == haystack[haystack_start_pos:haystack_end_pos]
):
return haystack_start_pos
return -1
def main():
haystack = "abcs"
needle = ""
print(rabin_karp(haystack, needle))
if __name__ == "__main__":
main()
|
[
"functools.lru_cache"
] |
[((110, 133), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (119, 133), False, 'from functools import lru_cache\n')]
|
from bs4 import BeautifulSoup
from black_list.items import BLICEFugitivesListItem
from scrapy import Spider, Request
import os
class BlIcefugitiveslistSpider(Spider):
name = 'BL_ICEFugitivesList'
allowed_domains = ['www.ice.gov']
start_urls = ['https://www.ice.gov/most-wanted']
header = 'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case Number|Scars|Address|Synopsis|Warning'
def parse(self, response):
with open(os.path.abspath('results/BL_ICEFugitivesList.txt'), 'a', encoding='utf-8') as f:
f.write(self.header + '\n')
soup = BeautifulSoup(response.body, 'lxml')
tables = soup.select('.field-item')
tables.pop(0)
for table in tables:
links = table.find_all(text='READ MORE')
for link in links:
yield Request(url='https://www.ice.gov%s' % (link.parent['href']), callback=self.get_info)
def get_info(self, response):
soup = BeautifulSoup(response.body, 'lxml')
item = BLICEFugitivesListItem()
item['name'] = ''
item['offense'] = ''
item['aka'] = ''
item['sex'] = ''
item['dob'] = ''
item['pob'] = ''
item['complexion'] = ''
item['reward'] = ''
item['height'] = ''
item['weight'] = ''
item['eyes'] = ''
item['haia'] = ''
item['scars'] = ''
item['address'] = ''
item['synopsis'] = ''
item['warning'] = ''
if soup.find(text='Name') is not None:
item['name'] = soup.find(text='Name').parent.next_sibling.next_sibling.text
if soup.select('div.wanted-for') is not None:
item['offense'] = soup.select('div.wanted-for')[0].text
if soup.find(text='Alias') is not None:
item['aka'] = soup.find(text='Alias').parent.next_sibling.next_sibling.text
if soup.find(text='Gender') is not None:
item['sex'] = soup.find(text='Gender').parent.next_sibling.next_sibling.text
if soup.find(text='Date of Birth') is not None:
item['dob'] = soup.find(text='Date of Birth').parent.next_sibling.next_sibling.text
if soup.find(text='Place of Birth') is not None:
item['pob'] = soup.find(text='Place of Birth').parent.next_sibling.next_sibling.text
if soup.find(text='Skin Tone') is not None:
item['complexion'] = soup.find(text='Skin Tone').parent.next_sibling.next_sibling.text
if soup.find(text='Reward') is not None:
item['reward'] = soup.find(text='Reward').parent.next_sibling.next_sibling.text
if soup.find(text='Height') is not None:
item['height'] = soup.find(text='Height').parent.next_sibling.next_sibling.text
if soup.find(text='Weight') is not None:
item['weight'] = soup.find(text='Weight').parent.next_sibling.next_sibling.text
if soup.find(text='Eyes') is not None:
item['eyes'] = soup.find(text='Eyes').parent.next_sibling.next_sibling.text
if soup.find(text='Hair') is not None:
item['haia'] = soup.find(text='Hair').parent.next_sibling.next_sibling.text
if soup.find(text='Scars/Marks') is not None:
item['scars'] = soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text
if soup.find(text='Last Known Location') is not None:
item['address'] = soup.find(text='Last Known Location').parent.next_sibling.next_sibling.text
values = soup.select('div[class="field-label"]')
if values:
for i in values:
if "Summary:" in i.text:
item['synopsis'] = i.next_sibling.text
if "Warning:" in i.text:
item['warning'] = i.next_sibling.text
yield item
|
[
"bs4.BeautifulSoup",
"os.path.abspath",
"black_list.items.BLICEFugitivesListItem",
"scrapy.Request"
] |
[((615, 651), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.body', '"""lxml"""'], {}), "(response.body, 'lxml')\n", (628, 651), False, 'from bs4 import BeautifulSoup\n'), ((988, 1024), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.body', '"""lxml"""'], {}), "(response.body, 'lxml')\n", (1001, 1024), False, 'from bs4 import BeautifulSoup\n'), ((1040, 1064), 'black_list.items.BLICEFugitivesListItem', 'BLICEFugitivesListItem', ([], {}), '()\n', (1062, 1064), False, 'from black_list.items import BLICEFugitivesListItem\n'), ((479, 529), 'os.path.abspath', 'os.path.abspath', (['"""results/BL_ICEFugitivesList.txt"""'], {}), "('results/BL_ICEFugitivesList.txt')\n", (494, 529), False, 'import os\n'), ((853, 940), 'scrapy.Request', 'Request', ([], {'url': "('https://www.ice.gov%s' % link.parent['href'])", 'callback': 'self.get_info'}), "(url='https://www.ice.gov%s' % link.parent['href'], callback=self.\n get_info)\n", (860, 940), False, 'from scrapy import Spider, Request\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) <NAME> [~º/~] and Contributors
# All rights reserved.
#
# This is free software; you can do what the LICENCE file allows you to.
#
'''The query language core.
'''
import ast
import types
from xoutil.symbols import Unset
from xoutil.objects import memoized_property
from collections import MappingView, Mapping
from xoutil.decorator.meta import decorator
from xotl.ql import interfaces
class Universe:
'''The class of the `this`:obj: object.
The `this` object is simply a name from which objects can be drawn in a
query.
'''
def __new__(cls):
res = getattr(cls, 'instance', None)
if not res:
res = super().__new__(cls)
cls.instance = res
return res
def __getitem__(self, key):
return self
def __getattr__(self, name):
return self
def __iter__(self):
return self
def next(self):
raise StopIteration
__next__ = next
this = Universe()
RESERVED_ARGUMENTS = (
'limit', 'offset', 'groups', 'order', 'get_value', 'qst', '_frame'
)
class QueryObject:
frame_type = 'xotl.ql.core.Frame'
def __init__(self, qst, _frame, **kwargs):
self.qst = qst
self._frame = _frame
if any(name in RESERVED_ARGUMENTS for name in kwargs):
raise TypeError('Invalid keyword argument')
self.expression = kwargs.pop('expression', None)
for attr, val in kwargs.items():
setattr(self, attr, val)
def get_value(self, name, only_globals=False):
if not only_globals:
res = self._frame.f_locals.get(name, Unset)
else:
res = Unset
if res is Unset:
res = self._frame.f_globals.get(name, Unset)
if res is not Unset:
return res
else:
raise NameError(name)
@memoized_property
def locals(self):
return self._frame.f_locals
@memoized_property
def globals(self):
return self._frame.f_globals
@memoized_property
def source(self):
builder = SourceBuilder()
return builder.get_source(self.qst)
def get_query_object(generator,
query_type='xotl.ql.core.QueryObject',
frame_type=None,
**kwargs):
'''Get the query object from a query expression.
'''
from xoutil.objects import import_object
from xotl.ql.revenge import Uncompyled
uncompiled = Uncompyled(generator)
gi_frame = generator.gi_frame
QueryObjectType = import_object(query_type)
FrameType = import_object(frame_type or QueryObjectType.frame_type)
return QueryObjectType(
uncompiled.qst,
FrameType(gi_frame.f_locals, gi_frame.f_globals),
expression=generator,
**kwargs
)
# Alias to the old API.
these = get_query_object
def get_predicate_object(func, predicate_type='xotl.ql.core.QueryObject',
frame_type=None, **kwargs):
'''Get a predicate object from a predicate expression.
'''
from xoutil.objects import import_object
from .revenge import Uncompyled
uncompiled = Uncompyled(func)
PredicateClass = import_object(predicate_type)
FrameClass = import_object(frame_type or PredicateClass.frame_type)
return PredicateClass(
uncompiled.qst,
FrameClass(_get_closure(func), func.__globals__),
predicate=func,
**kwargs
)
def normalize_query(which, **kwargs):
'''Ensure a query object.
If `which` is a query expression (more precisely a generator object) it is
passed to `get_query_object`:func: along with all keyword arguments.
If `which` is not a query expression it must be a `query object`:term:,
other types are a TypeError.
'''
from types import GeneratorType
if isinstance(which, GeneratorType):
return get_query_object(which, **kwargs)
else:
if not isinstance(which, interfaces.QueryObject):
raise TypeError('Query object expected, but object provided '
'is not: %r' % type(which))
return which
@decorator
def thesefy(target, make_subquery=True):
'''Allow an object to participate in queries.
Example as a wrapper::
class People:
# ...
pass
query = (who for who in thesefy(People))
Example as a decorator::
@thesefy
class People:
pass
query = (who for who in People)
If `target` already support the iterable protocol (i.e implement
``__iter__``), return it unchanged.
If `make_subquery` is True, then the query shown above will be equivalent
to::
query = (who for who in (x for x in this if isinstance(x, People)))
If `make_subquery` is False, `thesefy` injects an ``__iter__()`` that
simply returns the same object and a ``next()`` method that immediately
stops the iteration.
Notice that in order to use `make_subquery` you call `thesefy`:func: as a
decorator-returning function::
class Person:
pass
query = (x for x in thesefy(make_subquery=False)(Person))
# or simply as a decorator
@thesefy(make_subquery=False)
class Person:
pass
'''
if getattr(target, '__iter__', None):
return target
class new_meta(type(target)):
if make_subquery:
def __iter__(self):
return (x for x in this if isinstance(x, self))
else:
def __iter__(self):
return self
def next(self):
raise StopIteration
__next__ = next
from xoutil.objects import copy_class
new_class = copy_class(target, meta=new_meta)
return new_class
class Frame:
def __init__(self, locals, globals, **kwargs):
self.auto_expand_subqueries = kwargs.pop('auto_expand_subqueries',
True)
self.f_locals = _FrameView(locals)
self.f_globals = _FrameView(globals)
self.f_locals.owner = self.f_globals.owner = self
class _FrameView(MappingView, Mapping):
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def __getitem__(self, key):
res = self._mapping[key]
if self.owner.auto_expand_subqueries and key == '.0':
return sub_query_or_value(res)
else:
return res
def get(self, key, default=None):
res = self._mapping.get(key, default)
if self.owner.auto_expand_subqueries and key == '.0':
return sub_query_or_value(res)
else:
return res
def __iter__(self):
return iter(self._mapping)
def _get_closure(obj):
assert isinstance(obj, types.FunctionType)
if obj.__closure__:
return {
name: cell.cell_contents
for name, cell in zip(obj.__code__.co_freevars, obj.__closure__)
}
else:
return {}
def sub_query_or_value(v):
if isinstance(v, types.GeneratorType) and v.gi_code.co_name == '<genexpr>':
return get_query_object(v)
else:
return v
class SourceBuilder(ast.NodeVisitor):
def get_source(self, node):
stack = self.stack = []
self.visit(node)
assert len(stack) == 1, 'Remaining items %r at %r' % (stack, node)
return stack.pop()
def visit_And(self, node):
self.stack.append(' and ')
def visit_Or(self, node):
self.stack.append(' or ')
def visit_Name(self, node):
self.stack.append(node.id)
def visit_BoolOp(self, node):
self.visit(node.op)
for val in node.values:
self.visit(val)
exprs = []
for _ in range(len(node.values)):
exprs.insert(0, self.stack.pop(-1))
op = self.stack.pop(-1)
self.stack.append('(%s)' % op.join(exprs))
def visit_BinOp(self, node):
stack = self.stack
self.visit(node.op)
self.visit(node.right)
self.visit(node.left)
left = stack.pop(-1)
right = stack.pop(-1)
op = stack.pop(-1)
stack.append('(%s%s%s)' % (left, op, right))
def visit_Add(self, node):
self.stack.append(' + ')
def visit_Sub(self, node):
self.stack.append(' - ')
def visit_Mult(self, node):
self.stack.append(' * ')
def visit_Div(self, node):
self.stack.append(' / ')
def visit_Mod(self, node):
self.stack.append(' % ')
def visit_Pow(self, node):
self.stack.append(' ** ')
def visit_LShift(self, node):
self.stack.append(' << ')
def visit_RShift(self, node):
self.stack.append(' >> ')
def visit_BitOr(self, node):
self.stack.append(' | ')
def visit_BitAnd(self, node):
self.stack.append(' & ')
def visit_BitXor(self, node):
self.stack.append(' ^ ')
def visit_FloorDiv(self, node):
self.stack.append(' // ')
def visit_Num(self, node):
self.stack.append('%s' % node.n)
def visit_UnaryOp(self, node):
stack = self.stack
self.visit(node.op)
self.visit(node.operand)
operand = stack.pop(-1)
op = stack.pop(-1)
stack.append('(%s%s)' % (op, operand))
def visit_Invert(self, node):
self.stack.append('~')
def visit_Not(self, node):
self.stack.append('not ')
def visit_UAdd(self, node):
self.stack.append('+')
def visit_USub(self, node):
self.stack.append('-')
def visit_IfExp(self, node):
self.visit(node.orelse)
self.visit(node.test)
self.visit(node.body)
body = self.stack.pop(-1)
test = self.stack.pop(-1)
orelse = self.stack.pop(-1)
self.stack.append('(%s if %s else %s)' % (body, test, orelse))
def visit_Lambda(self, node):
raise NotImplementedError()
def visit_Dict(self, node):
# order does not really matter but I'm picky
for k, v in reversed(zip(node.keys, node.values)):
self.visit(v)
self.visit(k)
dictbody = ', '.join(
'%s: %s' % (self.stack.pop(-1), self.stack.pop(-1))
for _ in range(len(node.keys))
)
self.stack.append('{%s}' % dictbody)
def visit_Set(self, node):
for elt in reversed(node.elts):
self.visit(elt)
setbody = ', '.join(self.stack.pop(-1) for _ in range(len(node.elts)))
self.stack.append('{%s}' % setbody)
def visit_ListComp(self, node):
self._visit_comp(node)
self.stack.append('[%s]' % self.stack.pop(-1))
def visit_SetComp(self, node):
self._visit_comp(node)
self.stack.append('{%s}' % self.stack.pop(-1))
def visit_DictComp(self, node):
self.visit(node.value)
self.visit(node.key)
pop = lambda: self.stack.pop(-1)
lines = ['%s: %s' % (pop(), pop())]
self._visit_generators(node)
lines.append(pop())
self.stack.append('{%s}' % ' '.join(lines))
def visit_GeneratorExp(self, node):
self._visit_comp(node)
self.stack.append('(%s)' % self.stack.pop(-1))
def _visit_comp(self, node):
self.visit(node.elt)
pop = lambda: self.stack.pop(-1)
lines = [pop()]
self._visit_generators(node)
lines.append(pop())
self.stack.append(' '.join(lines))
def _visit_generators(self, node):
for comp in reversed(node.generators):
for if_ in reversed(comp.ifs):
self.visit(if_)
self.stack.append(len(comp.ifs)) # save the length of ifs [*]
self.visit(comp.iter)
self.visit(comp.target)
pop = lambda: self.stack.pop(-1)
lines = []
for _ in range(len(node.generators)):
lines.append('for %s in %s' % (pop(), pop()))
for if_ in range(pop()): # [*] pop the length of ifs
lines.append('if %s' % pop())
self.stack.append(' '.join(lines))
def visit_Yield(self, node):
raise TypeError('Invalid node Yield')
def visit_Eq(self, node):
self.stack.append(' == ')
def visit_NotEq(self, node):
self.stack.append(' != ')
def visit_Lt(self, node):
self.stack.append(' < ')
def visit_LtE(self, node):
self.stack.append(' <= ')
def visit_Gt(self, node):
self.stack.append(' > ')
def visit_GtE(self, node):
self.stack.append(' >= ')
def visit_Is(self, node):
self.stack.append(' is ')
def visit_IsNot(self, node):
self.stack.append(' is not ')
def visit_In(self, node):
self.stack.append(' in ')
def visit_NotIn(self, node):
self.stack.append(' not in ')
def visit_Compare(self, node):
self.visit(node.left)
for op, expr in reversed(zip(node.ops, node.comparators)):
self.visit(expr)
self.visit(op)
right = ''.join(
# I assume each operator has spaces around it
'%s%s' % (self.stack.pop(-1), self.stack.pop(-1))
for _ in range(len(node.ops))
)
self.stack.append('%s%s' % (self.stack.pop(-1), right))
def visit_Call(self, node):
if node.kwargs:
self.visit(node.kwargs)
if node.starargs:
self.visit(node.starargs)
for kw in reversed(node.keywords):
self.visit(kw.value)
self.stack.append(kw.arg)
for arg in reversed(node.args):
self.visit(arg)
self.visit(node.func)
func = self.stack.pop(-1)
args = [self.stack.pop(-1) for _ in range(len(node.args))]
keywords = [
(self.stack.pop(-1), self.stack.pop(-1))
for _ in range(len(node.keywords))
]
starargs = self.stack.pop(-1) if node.starargs else ''
kwargs = self.stack.pop(-1) if node.kwargs else ''
call = ', '.join(args)
if keywords:
if call:
call += ', '
call += ', '.join('%s=%s' % (k, v) for k, v in keywords)
if starargs:
if call:
call += ', '
call += '*%s' % starargs
if kwargs:
if call:
call += ', '
call += '**%s' % kwargs
self.stack.append('%s(%s)' % (func, call))
def visit_Str(self, node):
self.stack.append('%r' % node.s)
visit_Bytes = visit_Str
def visit_Repr(self, node):
raise NotImplementedError
def visit_Attribute(self, node):
self.visit(node.value)
self.stack.append('%s.%s' % (self.stack.pop(-1), node.attr))
def visit_Subscript(self, node):
self.visit(node.slice)
self.visit(node.value)
self.stack.append('%s[%s]' % (self.stack.pop(-1), self.stack.pop(-1)))
def visit_Ellipsis(self, node):
self.stack.append('...')
def visit_Slice(self, node):
if node.step:
self.visit(node.step)
step = self.stack.pop(-1)
else:
step = None
if node.upper:
self.visit(node.upper)
upper = self.stack.pop(-1)
else:
upper = None
if node.lower:
self.visit(node.lower)
lower = self.stack.pop(-1)
else:
lower = None
if lower:
res = '%s:' % lower
else:
res = ':'
if upper:
res += '%s' % upper
if step:
res += ':%s' % step
self.stack.append(res)
def visit_List(self, node):
for elt in reversed(node.elts):
self.visit(elt)
self.stack.append(
'[%s]' % ', '.join(
self.stack.pop(-1) for _ in range(len(node.elts))
)
)
def visit_Tuple(self, node):
for elt in reversed(node.elts):
self.visit(elt)
result = (
'(%s' % ', '.join(
self.stack.pop(-1) for _ in range(len(node.elts))
)
)
if len(node.elts) == 1:
result += ', )'
else:
result += ')'
self.stack.append(result)
del decorator
|
[
"xotl.ql.revenge.Uncompyled",
"xoutil.objects.import_object",
"xoutil.objects.copy_class"
] |
[((2590, 2611), 'xotl.ql.revenge.Uncompyled', 'Uncompyled', (['generator'], {}), '(generator)\n', (2600, 2611), False, 'from xotl.ql.revenge import Uncompyled\n'), ((2668, 2693), 'xoutil.objects.import_object', 'import_object', (['query_type'], {}), '(query_type)\n', (2681, 2693), False, 'from xoutil.objects import import_object\n'), ((2710, 2765), 'xoutil.objects.import_object', 'import_object', (['(frame_type or QueryObjectType.frame_type)'], {}), '(frame_type or QueryObjectType.frame_type)\n', (2723, 2765), False, 'from xoutil.objects import import_object\n'), ((3275, 3291), 'xotl.ql.revenge.Uncompyled', 'Uncompyled', (['func'], {}), '(func)\n', (3285, 3291), False, 'from xotl.ql.revenge import Uncompyled\n'), ((3313, 3342), 'xoutil.objects.import_object', 'import_object', (['predicate_type'], {}), '(predicate_type)\n', (3326, 3342), False, 'from xoutil.objects import import_object\n'), ((3360, 3414), 'xoutil.objects.import_object', 'import_object', (['(frame_type or PredicateClass.frame_type)'], {}), '(frame_type or PredicateClass.frame_type)\n', (3373, 3414), False, 'from xoutil.objects import import_object\n'), ((5868, 5901), 'xoutil.objects.copy_class', 'copy_class', (['target'], {'meta': 'new_meta'}), '(target, meta=new_meta)\n', (5878, 5901), False, 'from xoutil.objects import copy_class\n')]
|
# copyright (c) 2018 Larz60+
import ScraperPaths
import GetPage
import CIA_ScanTools
from lxml import html
from lxml.cssselect import CSSSelector
from lxml import etree
from lxml.etree import XPath
import re
import os
import sys
class CIA_InternationalOrgnizationsAndGroups:
def __init__(self):
self.spath = ScraperPaths.ScraperPaths()
self.gp = GetPage.GetPage()
self.getpage = self.gp.get_page
self.get_filename = self.gp.get_filename
self.cst = CIA_ScanTools.CIA_Scan_Tools()
self.fact_links = self.cst.fact_links
self.mainurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html'
self.filename = self.get_filename(self.mainurl)
self.mainpage = self.getpage(self.mainurl, self.filename)
self.scrape_text()
self.cst.save_fact_links()
def remove_fluff(self, item):
if '\r\n' in item or '\n' in item:
nitem = ''
parts = item.split('\n')
for part in parts:
nitem = f'{nitem.strip()} {part.strip()}'
return nitem
else:
return item
def scrape_text(self):
tree = html.fromstring(self.mainpage)
# html.open_in_browser(tree)
c1 = self.fact_links['InternationalOrginizationsAndGroups'] = {}
childno = 1
while True:
xx = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > span:nth-child(1)')
# print(xx[0].text)
if len(xx) == 0:
break
title = self.remove_fluff(xx[0].text.strip())
# print(f'Title: {title}')
c2 = c1[title] = {}
# yy = tree.cssselect(f'li.ln-a:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)')
yy = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)')
if len(yy[0]) > 1:
# print(f'\n...length yy: {len(yy[0])}')
c3 = c2['Description'] = []
# print(f'{html.tostring(yy[0])}')
for n, element in enumerate(yy[0]):
if n % 2 == 0:
desc = self.cst.fluffinutter(html.tostring(element).decode('utf-8'))
c3.append(desc)
else:
c3 = c2['Description'] = []
description = self.remove_fluff(yy[0].text.strip())
c3.append(description)
# print(f'Description: {description}')
childno += 1
if __name__ == '__main__':
CIA_InternationalOrgnizationsAndGroups()
|
[
"lxml.html.tostring",
"lxml.html.fromstring",
"GetPage.GetPage",
"ScraperPaths.ScraperPaths",
"CIA_ScanTools.CIA_Scan_Tools"
] |
[((323, 350), 'ScraperPaths.ScraperPaths', 'ScraperPaths.ScraperPaths', ([], {}), '()\n', (348, 350), False, 'import ScraperPaths\n'), ((369, 386), 'GetPage.GetPage', 'GetPage.GetPage', ([], {}), '()\n', (384, 386), False, 'import GetPage\n'), ((495, 525), 'CIA_ScanTools.CIA_Scan_Tools', 'CIA_ScanTools.CIA_Scan_Tools', ([], {}), '()\n', (523, 525), False, 'import CIA_ScanTools\n'), ((1213, 1243), 'lxml.html.fromstring', 'html.fromstring', (['self.mainpage'], {}), '(self.mainpage)\n', (1228, 1243), False, 'from lxml import html\n'), ((2377, 2399), 'lxml.html.tostring', 'html.tostring', (['element'], {}), '(element)\n', (2390, 2399), False, 'from lxml import html\n')]
|
# Copyright 2017 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
from tempest.lib.api_schema.response.compute.v2_47 import servers as servers247
show_server_diagnostics = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'state': {
'type': 'string', 'enum': [
'pending', 'running', 'paused', 'shutdown', 'crashed',
'suspended']
},
'driver': {
'type': 'string', 'enum': [
'libvirt', 'xenapi', 'vmwareapi', 'ironic', 'hyperv']
},
'hypervisor': {'type': ['string', 'null']},
'hypervisor_os': {'type': ['string', 'null']},
'uptime': {'type': ['integer', 'null']},
'config_drive': {'type': 'boolean'},
'num_cpus': {'type': 'integer'},
'num_nics': {'type': 'integer'},
'num_disks': {'type': 'integer'},
'memory_details': {
'type': 'object',
'properties': {
'maximum': {'type': ['integer', 'null']},
'used': {'type': ['integer', 'null']}
},
'additionalProperties': False,
'required': ['maximum', 'used']
},
'cpu_details': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'null']},
'time': {'type': ['integer', 'null']},
'utilisation': {'type': ['integer', 'null']}
},
'additionalProperties': False,
'required': ['id', 'time', 'utilisation']
}
},
'nic_details': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'mac_address': {'oneOf': [parameter_types.mac_address,
{'type': 'null'}]},
'rx_octets': {'type': ['integer', 'null']},
'rx_errors': {'type': ['integer', 'null']},
'rx_drop': {'type': ['integer', 'null']},
'rx_packets': {'type': ['integer', 'null']},
'rx_rate': {'type': ['integer', 'null']},
'tx_octets': {'type': ['integer', 'null']},
'tx_errors': {'type': ['integer', 'null']},
'tx_drop': {'type': ['integer', 'null']},
'tx_packets': {'type': ['integer', 'null']},
'tx_rate': {'type': ['integer', 'null']}
},
'additionalProperties': False,
'required': ['mac_address', 'rx_octets', 'rx_errors',
'rx_drop',
'rx_packets', 'rx_rate', 'tx_octets',
'tx_errors',
'tx_drop', 'tx_packets', 'tx_rate']
}
},
'disk_details': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'read_bytes': {'type': ['integer', 'null']},
'read_requests': {'type': ['integer', 'null']},
'write_bytes': {'type': ['integer', 'null']},
'write_requests': {'type': ['integer', 'null']},
'errors_count': {'type': ['integer', 'null']}
},
'additionalProperties': False,
'required': ['read_bytes', 'read_requests', 'write_bytes',
'write_requests', 'errors_count']
}
}
},
'additionalProperties': False,
'required': [
'state', 'driver', 'hypervisor', 'hypervisor_os', 'uptime',
'config_drive', 'num_cpus', 'num_nics', 'num_disks',
'memory_details', 'cpu_details', 'nic_details', 'disk_details'],
}
}
get_server = copy.deepcopy(servers247.get_server)
|
[
"copy.deepcopy"
] |
[((4947, 4983), 'copy.deepcopy', 'copy.deepcopy', (['servers247.get_server'], {}), '(servers247.get_server)\n', (4960, 4983), False, 'import copy\n')]
|
# Generated by Django 2.1.2 on 2018-12-03 10:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('oferty', '0011_ofertyest_kto_prowadzi'),
]
operations = [
migrations.AlterField(
model_name='ofertyest',
name='kto_prowadzi',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='oferty.OfertyUsers'),
),
]
|
[
"django.db.models.ForeignKey"
] |
[((381, 484), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""oferty.OfertyUsers"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to='oferty.OfertyUsers')\n", (398, 484), False, 'from django.db import migrations, models\n')]
|
from compositecore import Leaf
import menu
import state
__author__ = 'co'
def start_accept_reject_prompt(state_stack, game_state, message):
prompt = menu.AcceptRejectPrompt(state_stack, message)
game_state.start_prompt(state.UIState(prompt))
return prompt.result
class PromptPlayer(Leaf):
def __init__(self, message):
super(PromptPlayer, self).__init__()
self.tags = ["prompt_player"]
self.text = message
def prompt_player(self, **kwargs):
target_entity = kwargs["target_entity"]
return start_accept_reject_prompt(target_entity.game_state.value.menu_prompt_stack,
target_entity.game_state.value, self.text)
|
[
"state.UIState",
"menu.AcceptRejectPrompt"
] |
[((156, 201), 'menu.AcceptRejectPrompt', 'menu.AcceptRejectPrompt', (['state_stack', 'message'], {}), '(state_stack, message)\n', (179, 201), False, 'import menu\n'), ((230, 251), 'state.UIState', 'state.UIState', (['prompt'], {}), '(prompt)\n', (243, 251), False, 'import state\n')]
|
import pytest
from insights.core.dr import SkipComponent
from insights.parsers.ntp_sources import ChronycSources, NtpqPn, NtpqLeap
from insights.tests import context_wrap
chrony_output = """
210 Number of sources = 3
MS Name/IP address Stratum Poll Reach LastRx Last sample
===============================================================================
#* GPS0 0 4 377 11 -479ns[ -621ns] +/- 134ns
^? a.b.c 2 6 377 23 -923us[ -924us] +/- 43ms
^+ d.e.f 1 6 377 21 -2629us[-2619us] +/- 86ms
""".strip()
ntpq_leap_output = """
leap=00
""".strip()
ntpq_leap_output_2 = """
assID=0 status=06f4 leap_none, sync_ntp, 15 events, event_peer/strat_chg,
leap=00
""".strip()
ntpd_output = """
remote refid st t when poll reach delay offset jitter
==============================================================================
*ntp103.cm4.tbsi 10.225.208.100 2 u 225 256 377 0.464 0.149 0.019
+ntp104.cm4.tbsi 10.228.209.150 2 u 163 256 377 0.459 -0.234 0.05
""".strip()
ntpd_qn = """
remote refid st t when poll reach delay offset jitter
==============================================================================
172.16.58.3 .INIT. 16 u - 1024 0 0.000 0.000 0.000
"""
ntp_connection_issue = """
/usr/sbin/ntpq: read: Connection refused
""".strip()
def test_get_chrony_sources():
parser_result = ChronycSources(context_wrap(chrony_output))
assert parser_result.data[1].get("source") == "a.b.c"
assert parser_result.data[2].get("state") == "+"
assert parser_result.data[2].get("mode") == "^"
def test_get_ntpq_leap():
parser_result = NtpqLeap(context_wrap(ntpq_leap_output))
assert parser_result.leap == "00"
parser_result = NtpqLeap(context_wrap(ntpq_leap_output_2))
assert parser_result.leap == "00"
with pytest.raises(SkipComponent) as e:
NtpqLeap(context_wrap(ntp_connection_issue))
assert "NTP service is down" in str(e)
def test_get_ntpd_sources():
parser_result = NtpqPn(context_wrap(ntpd_output))
assert parser_result.data[0].get("source") == "ntp103.cm4.tbsi"
assert parser_result.data[1].get("flag") == "+"
assert parser_result.data[1].get("source") == "ntp104.cm4.tbsi"
parser_result2 = NtpqPn(context_wrap(ntpd_qn))
assert parser_result2.data[0].get("source") == "172.16.58.3"
assert parser_result2.data[0].get("flag") == " "
with pytest.raises(SkipComponent) as e:
NtpqPn(context_wrap(ntp_connection_issue))
assert "NTP service is down" in str(e)
|
[
"insights.tests.context_wrap",
"pytest.raises"
] |
[((1442, 1469), 'insights.tests.context_wrap', 'context_wrap', (['chrony_output'], {}), '(chrony_output)\n', (1454, 1469), False, 'from insights.tests import context_wrap\n'), ((1691, 1721), 'insights.tests.context_wrap', 'context_wrap', (['ntpq_leap_output'], {}), '(ntpq_leap_output)\n', (1703, 1721), False, 'from insights.tests import context_wrap\n'), ((1791, 1823), 'insights.tests.context_wrap', 'context_wrap', (['ntpq_leap_output_2'], {}), '(ntpq_leap_output_2)\n', (1803, 1823), False, 'from insights.tests import context_wrap\n'), ((1873, 1901), 'pytest.raises', 'pytest.raises', (['SkipComponent'], {}), '(SkipComponent)\n', (1886, 1901), False, 'import pytest\n'), ((2062, 2087), 'insights.tests.context_wrap', 'context_wrap', (['ntpd_output'], {}), '(ntpd_output)\n', (2074, 2087), False, 'from insights.tests import context_wrap\n'), ((2306, 2327), 'insights.tests.context_wrap', 'context_wrap', (['ntpd_qn'], {}), '(ntpd_qn)\n', (2318, 2327), False, 'from insights.tests import context_wrap\n'), ((2457, 2485), 'pytest.raises', 'pytest.raises', (['SkipComponent'], {}), '(SkipComponent)\n', (2470, 2485), False, 'import pytest\n'), ((1925, 1959), 'insights.tests.context_wrap', 'context_wrap', (['ntp_connection_issue'], {}), '(ntp_connection_issue)\n', (1937, 1959), False, 'from insights.tests import context_wrap\n'), ((2507, 2541), 'insights.tests.context_wrap', 'context_wrap', (['ntp_connection_issue'], {}), '(ntp_connection_issue)\n', (2519, 2541), False, 'from insights.tests import context_wrap\n')]
|
#! /usr/bin/env python -*- coding: utf-8 -*-
"""
Name:
exif_view.py
Desscription:
Display any EXIF data attached to the image.
Version:
1 - Initial release
Author:
J.MacGrillen <<EMAIL>>
Copyright:
Copyright (c) <NAME>. All rights reserved.
"""
import logging
from PyQt5.QtWidgets import QDockWidget, QVBoxLayout
from src.tools.exif_data import EXIFData
from PyQt5 import QtCore
from PyQt5.QtCore import Qt
self.logger.debug("Trying to extract EXIF data...")
self.exif_data = EXIFData(self.pil_image)
class EXIFView(QDockWidget):
"""
EXIF viewer
"""
v_layout: QVBoxLayout
def __init_subclass__(cls) -> None:
return super().__init_subclass__()
if __name__ == "__main__":
pass
|
[
"src.tools.exif_data.EXIFData"
] |
[((538, 562), 'src.tools.exif_data.EXIFData', 'EXIFData', (['self.pil_image'], {}), '(self.pil_image)\n', (546, 562), False, 'from src.tools.exif_data import EXIFData\n')]
|
from torchvision.datasets import CIFAR100 as C100
import torchvision.transforms as T
from .transforms import MultiSample, aug_transform
from .base import BaseDataset
def base_transform():
return T.Compose(
[T.ToTensor(), T.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))]
)
class CIFAR100(BaseDataset):
def ds_train(self):
t = MultiSample(
aug_transform(32, base_transform, self.aug_cfg), n=self.aug_cfg.num_samples
)
return C100(root="./data", train=True, download=True, transform=t,)
def ds_clf(self):
t = base_transform()
return C100(root="./data", train=True, download=True, transform=t)
def ds_test(self):
t = base_transform()
return C100(root="./data", train=False, download=True, transform=t)
|
[
"torchvision.transforms.Normalize",
"torchvision.datasets.CIFAR100",
"torchvision.transforms.ToTensor"
] |
[((499, 558), 'torchvision.datasets.CIFAR100', 'C100', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 't'}), "(root='./data', train=True, download=True, transform=t)\n", (503, 558), True, 'from torchvision.datasets import CIFAR100 as C100\n'), ((627, 686), 'torchvision.datasets.CIFAR100', 'C100', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 't'}), "(root='./data', train=True, download=True, transform=t)\n", (631, 686), True, 'from torchvision.datasets import CIFAR100 as C100\n'), ((755, 815), 'torchvision.datasets.CIFAR100', 'C100', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 't'}), "(root='./data', train=False, download=True, transform=t)\n", (759, 815), True, 'from torchvision.datasets import CIFAR100 as C100\n'), ((221, 233), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (231, 233), True, 'import torchvision.transforms as T\n'), ((235, 298), 'torchvision.transforms.Normalize', 'T.Normalize', (['(0.5071, 0.4867, 0.4408)', '(0.2675, 0.2565, 0.2761)'], {}), '((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))\n', (246, 298), True, 'import torchvision.transforms as T\n')]
|
import time
import aws_scatter_gather.util.logger as logger
def trace(message, *args):
return Trace(message, *args)
def traced(f):
def wrapper(*args, **kwargs):
with trace("{} args={}, kwargs={}", f.__name__, [*args], {**kwargs}):
return f(*args, **kwargs)
return wrapper
class Trace(object):
def __init__(self, message, *args):
self.message = message.format(*args)
def __enter__(self):
self.start = time.time_ns()
logger.info("START \"%s\"...", str(self.message))
return self
def __exit__(self, exc_type, exc_value, tb):
self.end = time.time_ns()
self.duration_milis = int((self.end - self.start) / 1000 / 1000)
if exc_type is None:
logger.info("SUCCESS of \"%s\". Duration %d millis.", str(self.message), self.duration_milis)
else:
logger.info("FAILURE of \"%s\". Duration %d millis.", str(self.message), self.duration_milis,
exc_info=True)
async def __aenter__(self):
self.__enter__()
return self
async def __aexit__(self, exc_type, exc_value, tb):
self.__exit__(exc_type, exc_value, tb)
|
[
"time.time_ns"
] |
[((465, 479), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (477, 479), False, 'import time\n'), ((627, 641), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (639, 641), False, 'import time\n')]
|
import unittest
from slgnn.data_processing.pyg_datasets import JAK1Dude, JAK2Dude, JAK3Dude
from slgnn.config import FILTERED_PUBCHEM_FP_LEN
class TestDudeDatasets(unittest.TestCase):
def test_jak1_jak2_jak3(self):
jak = JAK1Dude()
data = jak[0]
self.assertEqual(data.x.size()[1], 6)
self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN))
self.assertEqual(data.edge_index.size()[0], 2)
jak = JAK3Dude()
data = jak[0]
self.assertEqual(data.x.size()[1], 6)
self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN))
self.assertEqual(data.edge_index.size()[0], 2)
jak = JAK2Dude()
data = jak[0]
self.assertEqual(data.x.size()[1], 6)
self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN))
self.assertEqual(data.edge_index.size()[0], 2)
|
[
"slgnn.data_processing.pyg_datasets.JAK2Dude",
"slgnn.data_processing.pyg_datasets.JAK1Dude",
"slgnn.data_processing.pyg_datasets.JAK3Dude"
] |
[((236, 246), 'slgnn.data_processing.pyg_datasets.JAK1Dude', 'JAK1Dude', ([], {}), '()\n', (244, 246), False, 'from slgnn.data_processing.pyg_datasets import JAK1Dude, JAK2Dude, JAK3Dude\n'), ((455, 465), 'slgnn.data_processing.pyg_datasets.JAK3Dude', 'JAK3Dude', ([], {}), '()\n', (463, 465), False, 'from slgnn.data_processing.pyg_datasets import JAK1Dude, JAK2Dude, JAK3Dude\n'), ((674, 684), 'slgnn.data_processing.pyg_datasets.JAK2Dude', 'JAK2Dude', ([], {}), '()\n', (682, 684), False, 'from slgnn.data_processing.pyg_datasets import JAK1Dude, JAK2Dude, JAK3Dude\n')]
|
import unittest
from bobocep.rules.actions.no_action import NoAction
from bobocep.rules.nfas.patterns.bobo_pattern import BoboPattern
from bobocep.setup.bobo_complex_event import \
BoboComplexEvent
class TestBoboComplexEvent(unittest.TestCase):
def test_constructor(self):
name = "evdef_name"
pattern = BoboPattern()
action = NoAction()
evdef = BoboComplexEvent(name=name,
pattern=pattern,
action=action)
self.assertEqual(name, evdef.name)
self.assertEqual(pattern, evdef.pattern)
self.assertEqual(action, evdef.action)
def test_constructor_actions_is_none(self):
name = "evdef_name"
pattern = BoboPattern()
action = None
evdef = BoboComplexEvent(name=name,
pattern=pattern,
action=action)
self.assertEqual(name, evdef.name)
self.assertEqual(pattern, evdef.pattern)
self.assertIsNone(evdef.action)
|
[
"bobocep.rules.nfas.patterns.bobo_pattern.BoboPattern",
"bobocep.setup.bobo_complex_event.BoboComplexEvent",
"bobocep.rules.actions.no_action.NoAction"
] |
[((331, 344), 'bobocep.rules.nfas.patterns.bobo_pattern.BoboPattern', 'BoboPattern', ([], {}), '()\n', (342, 344), False, 'from bobocep.rules.nfas.patterns.bobo_pattern import BoboPattern\n'), ((362, 372), 'bobocep.rules.actions.no_action.NoAction', 'NoAction', ([], {}), '()\n', (370, 372), False, 'from bobocep.rules.actions.no_action import NoAction\n'), ((390, 449), 'bobocep.setup.bobo_complex_event.BoboComplexEvent', 'BoboComplexEvent', ([], {'name': 'name', 'pattern': 'pattern', 'action': 'action'}), '(name=name, pattern=pattern, action=action)\n', (406, 449), False, 'from bobocep.setup.bobo_complex_event import BoboComplexEvent\n'), ((751, 764), 'bobocep.rules.nfas.patterns.bobo_pattern.BoboPattern', 'BoboPattern', ([], {}), '()\n', (762, 764), False, 'from bobocep.rules.nfas.patterns.bobo_pattern import BoboPattern\n'), ((804, 863), 'bobocep.setup.bobo_complex_event.BoboComplexEvent', 'BoboComplexEvent', ([], {'name': 'name', 'pattern': 'pattern', 'action': 'action'}), '(name=name, pattern=pattern, action=action)\n', (820, 863), False, 'from bobocep.setup.bobo_complex_event import BoboComplexEvent\n')]
|
import cav as cav
import model as model
import tcav as tcav
import utils as utils
import utils_plot as utils_plot # utils_plot requires matplotlib
import os
import torch
import activation_generator as act_gen
import tensorflow as tf
working_dir = './tcav_class_test'
activation_dir = working_dir + '/activations/'
cav_dir = working_dir + '/cavs/'
source_dir = "./data/"
bottlenecks = ['conv2']
utils.make_dir_if_not_exists(activation_dir)
utils.make_dir_if_not_exists(working_dir)
utils.make_dir_if_not_exists(cav_dir)
# this is a regularizer penalty parameter for linear classifier to get CAVs.
alphas = [0.1]
target = 'cat'
concepts = ["dotted", "striped", "zigzagged"]
random_counterpart = 'random500_1'
LABEL_PATH = './data/imagenet_comp_graph_label_strings.txt'
mymodel = model.CNNWrapper(LABEL_PATH)
act_generator = act_gen.ImageActivationGenerator(mymodel, source_dir, activation_dir, max_examples=100)
tf.compat.v1.logging.set_verbosity(0)
num_random_exp = 30 # folders (random500_0, random500_1)
mytcav = tcav.TCAV(target,
concepts,
bottlenecks,
act_generator,
alphas,
cav_dir=cav_dir,
num_random_exp=num_random_exp)
results = mytcav.run()
utils_plot.plot_results(results, num_random_exp=num_random_exp)
|
[
"tcav.TCAV",
"utils.make_dir_if_not_exists",
"activation_generator.ImageActivationGenerator",
"tensorflow.compat.v1.logging.set_verbosity",
"model.CNNWrapper",
"utils_plot.plot_results"
] |
[((397, 441), 'utils.make_dir_if_not_exists', 'utils.make_dir_if_not_exists', (['activation_dir'], {}), '(activation_dir)\n', (425, 441), True, 'import utils as utils\n'), ((442, 483), 'utils.make_dir_if_not_exists', 'utils.make_dir_if_not_exists', (['working_dir'], {}), '(working_dir)\n', (470, 483), True, 'import utils as utils\n'), ((484, 521), 'utils.make_dir_if_not_exists', 'utils.make_dir_if_not_exists', (['cav_dir'], {}), '(cav_dir)\n', (512, 521), True, 'import utils as utils\n'), ((784, 812), 'model.CNNWrapper', 'model.CNNWrapper', (['LABEL_PATH'], {}), '(LABEL_PATH)\n', (800, 812), True, 'import model as model\n'), ((830, 921), 'activation_generator.ImageActivationGenerator', 'act_gen.ImageActivationGenerator', (['mymodel', 'source_dir', 'activation_dir'], {'max_examples': '(100)'}), '(mymodel, source_dir, activation_dir,\n max_examples=100)\n', (862, 921), True, 'import activation_generator as act_gen\n'), ((919, 956), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['(0)'], {}), '(0)\n', (953, 956), True, 'import tensorflow as tf\n'), ((1026, 1142), 'tcav.TCAV', 'tcav.TCAV', (['target', 'concepts', 'bottlenecks', 'act_generator', 'alphas'], {'cav_dir': 'cav_dir', 'num_random_exp': 'num_random_exp'}), '(target, concepts, bottlenecks, act_generator, alphas, cav_dir=\n cav_dir, num_random_exp=num_random_exp)\n', (1035, 1142), True, 'import tcav as tcav\n'), ((1277, 1340), 'utils_plot.plot_results', 'utils_plot.plot_results', (['results'], {'num_random_exp': 'num_random_exp'}), '(results, num_random_exp=num_random_exp)\n', (1300, 1340), True, 'import utils_plot as utils_plot\n')]
|
from subprocess import check_output, CalledProcessError, STDOUT
import sys
import re
import json
import logging
from .common import convert_external_variables
def scan(signature_path, file_path, external_variables={}, recursive=False):
'''
Scan files and return matches
:param signature_path: path to signature file
:type signature_path: string
:param file_path: files to scan
:type file_path: string
:return: dict
'''
variables = convert_external_variables(external_variables)
recursive = '-r' if recursive else ''
try:
scan_result = check_output("yara {} {} --print-meta --print-strings {} {}".format(variables, recursive, signature_path, file_path), shell=True, stderr=STDOUT)
except CalledProcessError as e:
logging.error("There seems to be an error in the rule file:\n{}".format(e.output.decode()))
return {}
try:
return _parse_yara_output(scan_result.decode())
except Exception as e:
logging.error('Could not parse yara result: {} {}'.format(sys.exc_info()[0].__name__, e))
return {}
def _parse_yara_output(output):
resulting_matches = dict()
match_blocks, rules = _split_output_in_rules_and_matches(output)
matches_regex = re.compile(r'((0x[a-f0-9]*):(\S+):\s(.+))+')
for index, rule in enumerate(rules):
for match in matches_regex.findall(match_blocks[index]):
_append_match_to_result(match, resulting_matches, rule)
return resulting_matches
def _split_output_in_rules_and_matches(output):
split_regex = re.compile(r'\n*.*\[.*\]\s\/.+\n*')
match_blocks = split_regex.split(output)
while '' in match_blocks:
match_blocks.remove('')
rule_regex = re.compile(r'(.*)\s\[(.*)\]\s([\.\.\/]|[\/]|[\.\/])(.+)')
rules = rule_regex.findall(output)
assert len(match_blocks) == len(rules)
return match_blocks, rules
def _append_match_to_result(match, resulting_matches, rule):
assert len(rule) == 4
rule_name, meta_string, _, _ = rule
assert len(match) == 4
_, offset, matched_tag, matched_string = match
meta_dict = _parse_meta_data(meta_string)
this_match = resulting_matches[rule_name] if rule_name in resulting_matches else dict(rule=rule_name, matches=True, strings=list(), meta=meta_dict)
this_match['strings'].append((int(offset, 16), matched_tag, matched_string.encode()))
resulting_matches[rule_name] = this_match
def _parse_meta_data(meta_data_string):
'''
Will be of form 'item0=lowercaseboolean0,item1="value1",item2=value2,..'
'''
meta_data = dict()
for item in meta_data_string.split(','):
if '=' in item:
key, value = item.split('=', maxsplit=1)
value = json.loads(value) if value in ['true', 'false'] else value.strip('\"')
meta_data[key] = value
else:
logging.warning('Malformed meta string \'{}\''.format(meta_data_string))
return meta_data
|
[
"json.loads",
"sys.exc_info",
"re.compile"
] |
[((1254, 1299), 're.compile', 're.compile', (['"""((0x[a-f0-9]*):(\\\\S+):\\\\s(.+))+"""'], {}), "('((0x[a-f0-9]*):(\\\\S+):\\\\s(.+))+')\n", (1264, 1299), False, 'import re\n'), ((1571, 1611), 're.compile', 're.compile', (['"""\\\\n*.*\\\\[.*\\\\]\\\\s\\\\/.+\\\\n*"""'], {}), "('\\\\n*.*\\\\[.*\\\\]\\\\s\\\\/.+\\\\n*')\n", (1581, 1611), False, 'import re\n'), ((1732, 1798), 're.compile', 're.compile', (['"""(.*)\\\\s\\\\[(.*)\\\\]\\\\s([\\\\.\\\\.\\\\/]|[\\\\/]|[\\\\.\\\\/])(.+)"""'], {}), "('(.*)\\\\s\\\\[(.*)\\\\]\\\\s([\\\\.\\\\.\\\\/]|[\\\\/]|[\\\\.\\\\/])(.+)')\n", (1742, 1798), False, 'import re\n'), ((2748, 2765), 'json.loads', 'json.loads', (['value'], {}), '(value)\n', (2758, 2765), False, 'import json\n'), ((1048, 1062), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1060, 1062), False, 'import sys\n')]
|
from astropy.io import ascii, fits
from astropy.table import QTable, Table
import arviz as az
from astropy.coordinates import SkyCoord
from astropy import units as u
import os
import pymoc
from astropy import wcs
from astropy.table import vstack, hstack
import numpy as np
import xidplus
# # Applying XID+CIGALE to Extreme Starbursts
# In this notebook, we read in the data files and prepare them for fitting with XID+CIGALE, the SED prior model extension to XID+. Here we focus on sources in [Rowan-Robinson et al. 2018](https://arxiv.org/abs/1704.07783) and claimed to have a star formation rate of $> 10^{3}\mathrm{M_{\odot}yr^{-1}}$
# In[2]:
def process_prior(c,new_Table=None,
path_to_data=['../../../data/'],
field=['Lockman-SWIRE'],
path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'],
redshift_file=["/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits"],
redshift_prior=[0.1,2.0],
radius=6.0,
alt_model=False):
# Import required modules
# In[3]:
# In[4]:
# Set image and catalogue filenames
# In[5]:
#Folder containing maps
pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250 map
pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350 map
plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500 map
#output folder
output_folder='./'
# Load in images, noise maps, header info and WCS information
# In[6]:
#-----250-------------
hdulist = fits.open(pswfits)
im250phdu=hdulist[0].header
im250hdu=hdulist[1].header
im250=hdulist[1].data*1.0E3 #convert to mJy
nim250=hdulist[3].data*1.0E3 #convert to mJy
w_250 = wcs.WCS(hdulist[1].header)
pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel size (in arcseconds)
hdulist.close()
#-----350-------------
hdulist = fits.open(pmwfits)
im350phdu=hdulist[0].header
im350hdu=hdulist[1].header
im350=hdulist[1].data*1.0E3 #convert to mJy
nim350=hdulist[3].data*1.0E3 #convert to mJy
w_350 = wcs.WCS(hdulist[1].header)
pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel size (in arcseconds)
hdulist.close()
#-----500-------------
hdulist = fits.open(plwfits)
im500phdu=hdulist[0].header
im500hdu=hdulist[1].header
im500=hdulist[1].data*1.0E3 #convert to mJy
nim500=hdulist[3].data*1.0E3 #convert to mJy
w_500 = wcs.WCS(hdulist[1].header)
pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel size (in arcseconds)
hdulist.close()
# XID+ uses Multi Order Coverage (MOC) maps for cutting down maps and catalogues so they cover the same area. It can also take in MOCs as selection functions to carry out additional cuts. Lets use the python module [pymoc](http://pymoc.readthedocs.io/en/latest/) to create a MOC, centered on a specific position we are interested in. We will use a HEALPix order of 15 (the resolution: higher order means higher resolution)
moc=pymoc.util.catalog.catalog_to_moc(c,100,15)
# Load in catalogue you want to fit (and make any cuts). Here we use HELP's VO database and directly call it using PyVO
# In[10]:
import pyvo as vo
service = vo.dal.TAPService("https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap")
# In[11]:
resultset = service.search("SELECT TOP 10000 * FROM herschelhelp.main WHERE 1=CONTAINS(POINT('ICRS', ra, dec),CIRCLE('ICRS',"+str(c.ra.deg[0])+", "+str(c.dec.deg[0])+", 0.028 ))")
# In[12]:
masterlist=resultset.table
def construct_prior(Table=None):
from astropy.coordinates import SkyCoord
#first use standard cut (i.e. not star and is detected in at least 3 opt/nir bands)
prior_list=masterlist[(masterlist['flag_gaia']!=3) & (masterlist['flag_optnir_det']>=3)]
#make skycoord from masterlist
catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec'])
#make skycoord from input table
c = SkyCoord(ra=Table['ra'], dec=Table['dec'])
#search around all of the new sources
idxc, idxcatalog, d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec)
#for every new sources
for src in range(0,len(Table)):
#limit to matches around interested sources
ind = idxc == src
#if there are matches
if ind.sum() >0:
#choose the closest and check if its in the prior list all ready
in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id']
#if its not in prior list
if in_prior.sum() <1:
print(in_prior.sum())
#add to appended sources
prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]])
return prior_list
# In[64]:
import astropy.units as u
#create table of candidate source
t = QTable([c.ra, c.dec], names=('ra', 'dec'))
#add candidate source to new sources table, create prior list
if new_Table is not None:
prior_list=construct_prior(vstack([t,new_Table]))
else:
prior_list = construct_prior(t)
if alt_model==True:
sep = 18
separation = c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec
remove_ind = (separation > np.min(separation)) & (separation < sep)
prior_list.remove_rows(remove_ind)
# ## Get Redshift and Uncertianty
#
# <NAME> defines a median and a hierarchical bayes combination redshift. We need uncertianty so lets match via `help_id`
# In[26]:
photoz=Table.read(redshift_file[0])
# In[27]:
#help_id=np.empty((len(photoz)),dtype=np.dtype('U27'))
for i in range(0,len(photoz)):
photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8')
#photoz['help_id']=help_id
# In[28]:
from astropy.table import Column, MaskedColumn
prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list))
prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc'))
# In[29]:
photoz
# In[30]:
ii=0
for i in range(0,len(prior_list)):
ind=photoz['help_id'] == prior_list['help_id'][i]
try:
if photoz['z1_median'][ind]>0.0:
prior_list['redshift'][i]=photoz['z1_median'][ind]
prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])]))
#prior_list['redshift_unc'].mask[i]=False
#prior_list['redshift'].mask[i]=False
except ValueError:
None
# In[33]:
dist_matrix=np.zeros((len(prior_list),len(prior_list)))
from astropy.coordinates import SkyCoord
from astropy import units as u
for i in range(0,len(prior_list)):
for j in range(0,len(prior_list)):
if i>j:
coord1 = SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs')
coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg)
dist_matrix[i,j] = coord1.separation(coord2).value
# In[35]:
ind=(np.tril(dist_matrix)<1.0/3600.0) & (np.tril(dist_matrix)>0)
xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list)))
yy[ind]
# In[36]:
prior_list[yy[ind]]
# In[37]:
prior_list['redshift'].mask[yy[ind]]=True
# In[38]:
prior_list=prior_list[prior_list['redshift'].mask == False]
# In[39]:
prior_list
# XID+ is built around two python classes. A prior and posterior class. There should be a prior class for each map being fitted. It is initiated with a map, noise map, primary header and map header and can be set with a MOC. It also requires an input prior catalogue and point spread function.
#
# In[40]:
#---prior250--------
prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty map, wcs info and primary header
prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id'])
prior250.prior_bkg(-5.0,5)#Set prior on background (assumes Gaussian pdf with mu and sigma)
#---prior350--------
prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc)
prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id'])
prior350.prior_bkg(-5.0,5)
#---prior500--------
prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc)
prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id'])
prior500.prior_bkg(-5.0,5)
# Set PSF. For SPIRE, the PSF can be assumed to be Gaussian with a FWHM of 18.15, 25.15, 36.3 '' for 250, 350 and 500 $\mathrm{\mu m}$ respectively. Lets use the astropy module to construct a Gaussian PSF and assign it to the three XID+ prior classes.
# In[41]:
#pixsize array (size of pixels in arcseconds)
pixsize=np.array([pixsize250,pixsize350,pixsize500])
#point response function for the three bands
prfsize=np.array([18.15,25.15,36.3])
#use Gaussian2DKernel to create prf (requires stddev rather than fwhm hence pfwhm/2.355)
from astropy.convolution import Gaussian2DKernel
##---------fit using Gaussian beam-----------------------
prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101)
prf250.normalize(mode='peak')
prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101)
prf350.normalize(mode='peak')
prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101)
prf500.normalize(mode='peak')
pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in terms of pixel scale of map
pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in terms of pixel scale of map
pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in terms of pixel scale of map
prior250.set_prf(prf250.array,pind250,pind250)#requires psf as 2d grid, and x and y bins for grid (in pixel scale)
prior350.set_prf(prf350.array,pind350,pind350)
prior500.set_prf(prf500.array,pind500,pind500)
print('fitting '+ str(prior250.nsrc)+' sources \n')
print('using ' + str(prior250.snpix)+', '+ str(prior350.snpix)+' and '+ str(prior500.snpix)+' pixels')
print('source density = {}'.format(prior250.nsrc/moc.area_sq_deg))
# Before fitting, the prior classes need to take the PSF and calculate how muich each source contributes to each pixel. This process provides what we call a pointing matrix. Lets calculate the pointing matrix for each prior class
# In[43]:
prior250.get_pointing_matrix()
prior350.get_pointing_matrix()
prior500.get_pointing_matrix()
# In[44]:
return [prior250,prior350,prior500],prior_list
def getSEDs(data, src, nsamp=30,category='posterior'):
import subprocess
if category=='posterior':
d=data.posterior
else:
d=data.prior
subsample = np.random.choice(d.chain.size * d.draw.size, size=nsamp,replace=False)
agn = d.agn.values.reshape(d.chain.size * d.draw.size,
d.src.size)[subsample, :]
z = d.redshift.values.reshape(d.chain.size * d.draw.size,
d.src.size)[subsample, :]
sfr = d.sfr.values.reshape(d.chain.size * d.draw.size,
d.src.size)[subsample, :]
fin = open("/Volumes/pdh_storage/cigale/pcigale_orig.ini")
fout = open("/Volumes/pdh_storage/cigale/pcigale.ini", "wt")
for line in fin:
if 'redshift =' in line:
fout.write(' redshift = ' + ', '.join(['{:.13f}'.format(i) for i in z[:, src]]) + ' \n')
elif 'fracAGN =' in line:
fout.write(' fracAGN = ' + ', '.join(['{:.13f}'.format(i) for i in agn[:, src]]) + ' \n')
else:
fout.write(line)
fin.close()
fout.close()
p = subprocess.Popen(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/')
p.wait()
SEDs = Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits')
# set more appropriate units for dust
from astropy.constants import L_sun, M_sun
SEDs['dust.luminosity'] = SEDs['dust.luminosity'] / L_sun.value
SEDs['dust.mass'] = SEDs['dust.mass'] / M_sun.value
wavelengths = []
fluxes = []
for i in range(0, nsamp):
sed_plot = Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i * nsamp + (i)]['id']))
wavelengths.append(sed_plot['wavelength'] / 1E3)
fluxes.append(((10.0 ** sfr[i, src]) / SEDs[i * nsamp + (i)]['sfh.sfr']) * sed_plot['Fnu'])
from astropy.table import vstack, hstack
return hstack(wavelengths), hstack(fluxes)
|
[
"astropy.convolution.Gaussian2DKernel",
"numpy.abs",
"numpy.argmin",
"numpy.arange",
"astropy.table.hstack",
"pymoc.util.catalog.catalog_to_moc",
"numpy.random.choice",
"subprocess.Popen",
"astropy.table.QTable",
"numpy.min",
"astropy.io.fits.open",
"pyvo.dal.TAPService",
"astropy.table.Table.read",
"numpy.tril",
"astropy.wcs.WCS",
"astropy.table.vstack",
"numpy.array",
"xidplus.prior",
"astropy.coordinates.SkyCoord"
] |
[((1773, 1791), 'astropy.io.fits.open', 'fits.open', (['pswfits'], {}), '(pswfits)\n', (1782, 1791), False, 'from astropy.io import ascii, fits\n'), ((1965, 1991), 'astropy.wcs.WCS', 'wcs.WCS', (['hdulist[1].header'], {}), '(hdulist[1].header)\n', (1972, 1991), False, 'from astropy import wcs\n'), ((2007, 2042), 'numpy.abs', 'np.abs', (['(3600.0 * w_250.wcs.cdelt[0])'], {}), '(3600.0 * w_250.wcs.cdelt[0])\n', (2013, 2042), True, 'import numpy as np\n'), ((2130, 2148), 'astropy.io.fits.open', 'fits.open', (['pmwfits'], {}), '(pmwfits)\n', (2139, 2148), False, 'from astropy.io import ascii, fits\n'), ((2322, 2348), 'astropy.wcs.WCS', 'wcs.WCS', (['hdulist[1].header'], {}), '(hdulist[1].header)\n', (2329, 2348), False, 'from astropy import wcs\n'), ((2364, 2399), 'numpy.abs', 'np.abs', (['(3600.0 * w_350.wcs.cdelt[0])'], {}), '(3600.0 * w_350.wcs.cdelt[0])\n', (2370, 2399), True, 'import numpy as np\n'), ((2487, 2505), 'astropy.io.fits.open', 'fits.open', (['plwfits'], {}), '(plwfits)\n', (2496, 2505), False, 'from astropy.io import ascii, fits\n'), ((2678, 2704), 'astropy.wcs.WCS', 'wcs.WCS', (['hdulist[1].header'], {}), '(hdulist[1].header)\n', (2685, 2704), False, 'from astropy import wcs\n'), ((2720, 2755), 'numpy.abs', 'np.abs', (['(3600.0 * w_500.wcs.cdelt[0])'], {}), '(3600.0 * w_500.wcs.cdelt[0])\n', (2726, 2755), True, 'import numpy as np\n'), ((3243, 3288), 'pymoc.util.catalog.catalog_to_moc', 'pymoc.util.catalog.catalog_to_moc', (['c', '(100)', '(15)'], {}), '(c, 100, 15)\n', (3276, 3288), False, 'import pymoc\n'), ((3465, 3552), 'pyvo.dal.TAPService', 'vo.dal.TAPService', (['"""https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap"""'], {}), "(\n 'https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap')\n", (3482, 3552), True, 'import pyvo as vo\n'), ((5204, 5246), 'astropy.table.QTable', 'QTable', (['[c.ra, c.dec]'], {'names': "('ra', 'dec')"}), "([c.ra, c.dec], names=('ra', 'dec'))\n", (5210, 5246), False, 'from astropy.table import QTable, Table\n'), ((5898, 5926), 'astropy.table.Table.read', 'Table.read', (['redshift_file[0]'], {}), '(redshift_file[0])\n', (5908, 5926), False, 'from astropy.table import QTable, Table\n'), ((8375, 8433), 'xidplus.prior', 'xidplus.prior', (['im250', 'nim250', 'im250phdu', 'im250hdu'], {'moc': 'moc'}), '(im250, nim250, im250phdu, im250hdu, moc=moc)\n', (8388, 8433), False, 'import xidplus\n'), ((8724, 8782), 'xidplus.prior', 'xidplus.prior', (['im350', 'nim350', 'im350phdu', 'im350hdu'], {'moc': 'moc'}), '(im350, nim350, im350phdu, im350hdu, moc=moc)\n', (8737, 8782), False, 'import xidplus\n'), ((8942, 9000), 'xidplus.prior', 'xidplus.prior', (['im500', 'nim500', 'im500phdu', 'im500hdu'], {'moc': 'moc'}), '(im500, nim500, im500phdu, im500hdu, moc=moc)\n', (8955, 9000), False, 'import xidplus\n'), ((9458, 9504), 'numpy.array', 'np.array', (['[pixsize250, pixsize350, pixsize500]'], {}), '([pixsize250, pixsize350, pixsize500])\n', (9466, 9504), True, 'import numpy as np\n'), ((9564, 9594), 'numpy.array', 'np.array', (['[18.15, 25.15, 36.3]'], {}), '([18.15, 25.15, 36.3])\n', (9572, 9594), True, 'import numpy as np\n'), ((9813, 9873), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', (['(prfsize[0] / 2.355)'], {'x_size': '(101)', 'y_size': '(101)'}), '(prfsize[0] / 2.355, x_size=101, y_size=101)\n', (9829, 9873), False, 'from astropy.convolution import Gaussian2DKernel\n'), ((9915, 9975), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', (['(prfsize[1] / 2.355)'], {'x_size': '(101)', 'y_size': '(101)'}), '(prfsize[1] / 2.355, x_size=101, y_size=101)\n', (9931, 9975), False, 'from astropy.convolution import Gaussian2DKernel\n'), ((10017, 10077), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', (['(prfsize[2] / 2.355)'], {'x_size': '(101)', 'y_size': '(101)'}), '(prfsize[2] / 2.355, x_size=101, y_size=101)\n', (10033, 10077), False, 'from astropy.convolution import Gaussian2DKernel\n'), ((11452, 11523), 'numpy.random.choice', 'np.random.choice', (['(d.chain.size * d.draw.size)'], {'size': 'nsamp', 'replace': '(False)'}), '(d.chain.size * d.draw.size, size=nsamp, replace=False)\n', (11468, 11523), True, 'import numpy as np\n'), ((12428, 12500), 'subprocess.Popen', 'subprocess.Popen', (["['pcigale', 'run']"], {'cwd': '"""/Volumes/pdh_storage/cigale/"""'}), "(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/')\n", (12444, 12500), False, 'import subprocess\n'), ((12526, 12592), 'astropy.table.Table.read', 'Table.read', (['"""/Volumes/pdh_storage/cigale/out//models-block-0.fits"""'], {}), "('/Volumes/pdh_storage/cigale/out//models-block-0.fits')\n", (12536, 12592), False, 'from astropy.table import QTable, Table\n'), ((4130, 4182), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': "masterlist['ra']", 'dec': "masterlist['dec']"}), "(ra=masterlist['ra'], dec=masterlist['dec'])\n", (4138, 4182), False, 'from astropy.coordinates import SkyCoord\n'), ((4234, 4276), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': "Table['ra']", 'dec': "Table['dec']"}), "(ra=Table['ra'], dec=Table['dec'])\n", (4242, 4276), False, 'from astropy.coordinates import SkyCoord\n'), ((13209, 13228), 'astropy.table.hstack', 'hstack', (['wavelengths'], {}), '(wavelengths)\n', (13215, 13228), False, 'from astropy.table import vstack, hstack\n'), ((13230, 13244), 'astropy.table.hstack', 'hstack', (['fluxes'], {}), '(fluxes)\n', (13236, 13244), False, 'from astropy.table import vstack, hstack\n'), ((5378, 5400), 'astropy.table.vstack', 'vstack', (['[t, new_Table]'], {}), '([t, new_Table])\n', (5384, 5400), False, 'from astropy.table import vstack, hstack\n'), ((7644, 7664), 'numpy.tril', 'np.tril', (['dist_matrix'], {}), '(dist_matrix)\n', (7651, 7664), True, 'import numpy as np\n'), ((7680, 7700), 'numpy.tril', 'np.tril', (['dist_matrix'], {}), '(dist_matrix)\n', (7687, 7700), True, 'import numpy as np\n'), ((10121, 10141), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(1)'], {}), '(0, 101, 1)\n', (10130, 10141), True, 'import numpy as np\n'), ((10213, 10233), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(1)'], {}), '(0, 101, 1)\n', (10222, 10233), True, 'import numpy as np\n'), ((10305, 10325), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(1)'], {}), '(0, 101, 1)\n', (10314, 10325), True, 'import numpy as np\n'), ((5527, 5572), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["prior_list['ra']", "prior_list['dec']"], {}), "(prior_list['ra'], prior_list['dec'])\n", (5535, 5572), False, 'from astropy.coordinates import SkyCoord\n'), ((5616, 5634), 'numpy.min', 'np.min', (['separation'], {}), '(separation)\n', (5622, 5634), True, 'import numpy as np\n'), ((7374, 7466), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': "(prior_list['ra'][i] * u.deg)", 'dec': "(prior_list['dec'][i] * u.deg)", 'frame': '"""icrs"""'}), "(ra=prior_list['ra'][i] * u.deg, dec=prior_list['dec'][i] * u.deg,\n frame='icrs')\n", (7382, 7466), False, 'from astropy.coordinates import SkyCoord\n'), ((7481, 7555), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': "(prior_list['ra'][j] * u.deg)", 'dec': "(prior_list['dec'][j] * u.deg)"}), "(ra=prior_list['ra'][j] * u.deg, dec=prior_list['dec'][j] * u.deg)\n", (7489, 7555), False, 'from astropy.coordinates import SkyCoord\n'), ((4781, 4800), 'numpy.argmin', 'np.argmin', (['d2d[ind]'], {}), '(d2d[ind])\n', (4790, 4800), True, 'import numpy as np\n'), ((6827, 6883), 'numpy.abs', 'np.abs', (["(photoz['z1_median'][ind] - photoz['z1_min'][ind])"], {}), "(photoz['z1_median'][ind] - photoz['z1_min'][ind])\n", (6833, 6883), True, 'import numpy as np\n'), ((6882, 6938), 'numpy.abs', 'np.abs', (["(photoz['z1_max'][ind] - photoz['z1_median'][ind])"], {}), "(photoz['z1_max'][ind] - photoz['z1_median'][ind])\n", (6888, 6938), True, 'import numpy as np\n'), ((5059, 5078), 'numpy.argmin', 'np.argmin', (['d2d[ind]'], {}), '(d2d[ind])\n', (5068, 5078), True, 'import numpy as np\n')]
|
import math
import random
from ..algorithm_common import AlgorithmCommon as AC
from ..algorithm_common import IAlgorithm
class Harmony(IAlgorithm):
def __init__(self,
harmony_max,
bandwidth=0.1,
enable_bandwidth_rate=False,
select_rate=0.8,
change_rate=0.3,
):
self.harmony_max = harmony_max
self.bandwidth = bandwidth
self.enable_bandwidth_rate = enable_bandwidth_rate
self.select_rate = select_rate
self.change_rate = change_rate
def init(self, problem):
self.problem = problem
self.count = 0
self.harmonys = []
for _ in range(self.harmony_max):
self.harmonys.append(problem.create())
def getMaxElement(self):
self.harmonys.sort(key=lambda x: x.getScore())
return self.harmonys[-1]
def getElements(self):
return self.harmonys
def step(self):
# 新しいharmonyを作成
arr = []
for i in range(self.problem.size):
if random.random() < self.select_rate:
# 新しく和音を生成
arr.append(self.problem.randomVal())
continue
# harmonyを1つ選択
h_arr = self.harmonys[random.randint(0, self.harmony_max-1)].getArray()
if random.random() < self.change_rate:
# 和音を変更
if self.enable_bandwidth_rate:
# 割合で bandwidth を指定
bandwidth = self.bandwidth * (self.problem.MAX_VAL - self.problem.MIN_VAL)
else:
bandwidth = self.bandwidth
n = h_arr[i] + bandwidth * (random.random()*2-1)
arr.append(n)
else:
# 和音を複製
arr.append(h_arr[i])
harmony = self.problem.create(arr)
self.count += 1
# 新しいharmonyが最悪harmonyより評価が高ければ置き換え
self.harmonys.sort(key=lambda x: x.getScore())
if self.harmonys[0].getScore() < harmony.getScore():
self.harmonys[0] = harmony
|
[
"random.random",
"random.randint"
] |
[((1040, 1055), 'random.random', 'random.random', ([], {}), '()\n', (1053, 1055), False, 'import random\n'), ((1309, 1324), 'random.random', 'random.random', ([], {}), '()\n', (1322, 1324), False, 'import random\n'), ((1243, 1282), 'random.randint', 'random.randint', (['(0)', '(self.harmony_max - 1)'], {}), '(0, self.harmony_max - 1)\n', (1257, 1282), False, 'import random\n'), ((1664, 1679), 'random.random', 'random.random', ([], {}), '()\n', (1677, 1679), False, 'import random\n')]
|
import datetime
import unittest
import autos.utils.date as date
class TestDateRange(unittest.TestCase):
def test_returns_today_date_as_default(self):
actual = list(date.date_range())
expected = [datetime.date.today()]
self.assertEqual(actual, expected)
def test_returns_correct_range(self):
actual = list(date.date_range(
since=(datetime.date.today() - datetime.timedelta(days=3)),
until=(datetime.date.today() - datetime.timedelta(days=1)),
))
expected = [
(datetime.date.today() - datetime.timedelta(days=1)),
(datetime.date.today() - datetime.timedelta(days=2)),
(datetime.date.today() - datetime.timedelta(days=3)),
]
self.assertEqual(actual, expected)
class TestGetPastDate(unittest.TestCase):
def test_returns_today_date_by_default(self):
actual = date.get_past_date()
expected = (datetime.date.today() - datetime.timedelta(days=0))
self.assertEqual(actual, expected)
def test_returns_past_3_days_ago_date(self):
actual = date.get_past_date(days=3)
expected = datetime.date.today() - datetime.timedelta(days=3)
self.assertEqual(actual, expected)
def test_returns_past_5_weeks_ago_date(self):
actual = date.get_past_date(weeks=5)
expected = datetime.date.today() - datetime.timedelta(weeks=5)
self.assertEqual(actual, expected)
def test_returns_past_3_days_and_2_weeks_ago_date(self):
actual = date.get_past_date(days=3, weeks=2)
expected = datetime.date.today() - datetime.timedelta(days=3, weeks=2)
self.assertEqual(actual, expected)
def test_returns_future_date_on_negative_input(self):
actual = date.get_past_date(days=-3, weeks=-2)
expected = datetime.date.today() + datetime.timedelta(days=3, weeks=2)
self.assertEqual(actual, expected)
|
[
"datetime.date.today",
"autos.utils.date.date_range",
"autos.utils.date.get_past_date",
"datetime.timedelta"
] |
[((906, 926), 'autos.utils.date.get_past_date', 'date.get_past_date', ([], {}), '()\n', (924, 926), True, 'import autos.utils.date as date\n'), ((1109, 1135), 'autos.utils.date.get_past_date', 'date.get_past_date', ([], {'days': '(3)'}), '(days=3)\n', (1127, 1135), True, 'import autos.utils.date as date\n'), ((1317, 1344), 'autos.utils.date.get_past_date', 'date.get_past_date', ([], {'weeks': '(5)'}), '(weeks=5)\n', (1335, 1344), True, 'import autos.utils.date as date\n'), ((1538, 1573), 'autos.utils.date.get_past_date', 'date.get_past_date', ([], {'days': '(3)', 'weeks': '(2)'}), '(days=3, weeks=2)\n', (1556, 1573), True, 'import autos.utils.date as date\n'), ((1772, 1809), 'autos.utils.date.get_past_date', 'date.get_past_date', ([], {'days': '(-3)', 'weeks': '(-2)'}), '(days=-3, weeks=-2)\n', (1790, 1809), True, 'import autos.utils.date as date\n'), ((179, 196), 'autos.utils.date.date_range', 'date.date_range', ([], {}), '()\n', (194, 196), True, 'import autos.utils.date as date\n'), ((218, 239), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (237, 239), False, 'import datetime\n'), ((947, 968), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (966, 968), False, 'import datetime\n'), ((971, 997), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(0)'}), '(days=0)\n', (989, 997), False, 'import datetime\n'), ((1155, 1176), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1174, 1176), False, 'import datetime\n'), ((1179, 1205), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(3)'}), '(days=3)\n', (1197, 1205), False, 'import datetime\n'), ((1364, 1385), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1383, 1385), False, 'import datetime\n'), ((1388, 1415), 'datetime.timedelta', 'datetime.timedelta', ([], {'weeks': '(5)'}), '(weeks=5)\n', (1406, 1415), False, 'import datetime\n'), ((1593, 1614), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1612, 1614), False, 'import datetime\n'), ((1617, 1652), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(3)', 'weeks': '(2)'}), '(days=3, weeks=2)\n', (1635, 1652), False, 'import datetime\n'), ((1829, 1850), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1848, 1850), False, 'import datetime\n'), ((1853, 1888), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(3)', 'weeks': '(2)'}), '(days=3, weeks=2)\n', (1871, 1888), False, 'import datetime\n'), ((556, 577), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (575, 577), False, 'import datetime\n'), ((580, 606), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (598, 606), False, 'import datetime\n'), ((622, 643), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (641, 643), False, 'import datetime\n'), ((646, 672), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(2)'}), '(days=2)\n', (664, 672), False, 'import datetime\n'), ((688, 709), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (707, 709), False, 'import datetime\n'), ((712, 738), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(3)'}), '(days=3)\n', (730, 738), False, 'import datetime\n'), ((385, 406), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (404, 406), False, 'import datetime\n'), ((409, 435), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(3)'}), '(days=3)\n', (427, 435), False, 'import datetime\n'), ((457, 478), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (476, 478), False, 'import datetime\n'), ((481, 507), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (499, 507), False, 'import datetime\n')]
|
# -*- coding: utf-8 -*-
from typing import Iterator, List, Optional, Tuple
from selenium.common.exceptions import (
StaleElementReferenceException,
TimeoutException
)
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support.expected_conditions import (
presence_of_all_elements_located,
visibility_of_all_elements_located,
visibility_of_any_elements_located
)
from selenium.webdriver.support.wait import WebDriverWait
from ..config import SHAWL_CONFIG as CONFIG
from ..exceptions import NoSuchElementsException
from ..utils._waits import wait_until
class BaseCollection:
"""
This class is base for all PageElement collections.
This class is a wrap above list of WebElement.
Property `collection` contains
list of WebElement and provide lazy load of it.
It will wait for any of WebElement to be present on the DOM for
`SHAWL_LAZY_LOAD_TIMEOUT` seconds.
Also, you can work with this class instance as with basic list.
For example::
base_collection = BaseCollection(driver, **{'css selector': 'div'})
for element in base_collection:
print(element.text)
first_element = base_collection[0]
assert len(base_collection) == 50
"""
def __init__(self,
driver: WebDriver,
repr_name: Optional[str] = None,
**locators):
self._driver: WebDriver = driver
self._selector: Tuple[str, str] = list(locators.items())[0]
self._collection: List[WebElement] = []
self._repr_name: str = repr_name or (f'{self.__class__.__name__}: '
f'{self._selector}')
def __str__(self) -> str:
return f'Selector: {self._selector}, Collection: {self._collection}'
def __repr__(self) -> str:
return self._repr_name
def __len__(self) -> int:
return len(self.collection)
def __iter__(self) -> Iterator[WebElement]:
return iter(self.collection)
def __getitem__(self, item) -> WebElement:
return self.collection[item]
def __bool__(self) -> bool:
return bool(self.collection)
def _load(self):
try:
self._collection = WebDriverWait(
self._driver,
CONFIG.lazy_load_timeout
).until(presence_of_all_elements_located(self._selector))
except TimeoutException as t_exc:
raise NoSuchElementsException(
'no such elements: '
'Unable to locate elements: '
'{"method":"%s","selector":"%s"}' % self._selector) from t_exc
def _return_locator(self, selector_type: str) -> str:
if self._selector[0] == selector_type:
return self._selector[1]
return ''
@property
def selector(self) -> Tuple[str, str]:
return self._selector
@property
def id(self) -> str:
# pylint: disable=invalid-name
return self._return_locator('id')
@property
def xpath(self) -> str:
return self._return_locator('xpath')
@property
def link_text(self) -> str:
return self._return_locator('link text')
@property
def partial_link_text(self) -> str:
return self._return_locator('partial link text')
@property
def name(self) -> str:
return self._return_locator('name')
@property
def tag_name(self) -> str:
return self._return_locator('tag name')
@property
def class_name(self) -> str:
return self._return_locator('class name')
@property
def css_selector(self) -> str:
return self._return_locator('css selector')
@property
def collection(self) -> List[WebElement]:
if not self._collection or not isinstance(self._collection, list):
self._load()
try:
for e in self._collection:
isinstance(e.location, dict)
except StaleElementReferenceException:
self._load()
return self._collection
def any_is_visible(self, wait: int = CONFIG.wait_timeout) -> bool:
"""
Check that at least one element from collection is visible
on a web page during 'wait' seconds.
Returns True if at least one element from collection is visible,
False otherwise
"""
return wait_until(self._driver,
wait,
visibility_of_any_elements_located(self._selector))
def all_are_visible(self, wait: int = CONFIG.wait_timeout) -> bool:
"""
Check that all elements from collection are present on the DOM of
a page and visible during 'wait' seconds.
Returns True if all elements from collection are visible,
False otherwise
"""
return wait_until(self._driver,
wait,
visibility_of_all_elements_located(self._selector))
def any_is_present(self, wait: int = CONFIG.wait_timeout) -> bool:
"""
Check that at least one element from collection is present
on a web page during 'wait' seconds.
Returns True if at least one element from collection is present,
False otherwise
"""
return wait_until(self._driver,
wait,
presence_of_all_elements_located(self._selector))
__all__ = ['BaseCollection']
|
[
"selenium.webdriver.support.expected_conditions.visibility_of_all_elements_located",
"selenium.webdriver.support.expected_conditions.presence_of_all_elements_located",
"selenium.webdriver.support.expected_conditions.visibility_of_any_elements_located",
"selenium.webdriver.support.wait.WebDriverWait"
] |
[((4533, 4583), 'selenium.webdriver.support.expected_conditions.visibility_of_any_elements_located', 'visibility_of_any_elements_located', (['self._selector'], {}), '(self._selector)\n', (4567, 4583), False, 'from selenium.webdriver.support.expected_conditions import presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located\n'), ((4994, 5044), 'selenium.webdriver.support.expected_conditions.visibility_of_all_elements_located', 'visibility_of_all_elements_located', (['self._selector'], {}), '(self._selector)\n', (5028, 5044), False, 'from selenium.webdriver.support.expected_conditions import presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located\n'), ((5449, 5497), 'selenium.webdriver.support.expected_conditions.presence_of_all_elements_located', 'presence_of_all_elements_located', (['self._selector'], {}), '(self._selector)\n', (5481, 5497), False, 'from selenium.webdriver.support.expected_conditions import presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located\n'), ((2425, 2473), 'selenium.webdriver.support.expected_conditions.presence_of_all_elements_located', 'presence_of_all_elements_located', (['self._selector'], {}), '(self._selector)\n', (2457, 2473), False, 'from selenium.webdriver.support.expected_conditions import presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located\n'), ((2315, 2368), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['self._driver', 'CONFIG.lazy_load_timeout'], {}), '(self._driver, CONFIG.lazy_load_timeout)\n', (2328, 2368), False, 'from selenium.webdriver.support.wait import WebDriverWait\n')]
|
from .flasher_error import FlasherError
import time
import flask
import requests
import tempfile
import os
import re
from threading import Thread
class BaseFlasher:
def __init__(self, settings, printer, plugin, plugin_manager, identifier, logger):
self._settings = settings
self._printer = printer
self._plugin = plugin
self._plugin_manager = plugin_manager
self._identifier = identifier
self._logger = logger
self._firmware = None
self._firmware_version = None
self._firmware_author = None
self._firmware_upload_time = None
self._should_run_post_script = False
self._flash_status = None
def _background_run(self, target, args=None):
thread = Thread(target=target, args=args)
thread.start()
return thread
def _run_pre_flash_script(self):
pre_flash_script = self._settings.get_pre_flash_script()
if pre_flash_script:
self._logger.debug("Running pre-flash GCode script :")
self._logger.debug(pre_flash_script)
commands = [line.strip() for line in pre_flash_script.splitlines()]
self._printer.commands(commands)
else:
self._logger.debug("No pre-flash GCode script defined")
def _wait_pre_flash_delay(self):
self._logger.debug("Waiting pre-flash delay...")
time.sleep(self._settings.get_pre_flash_delay())
def _run_post_flash_script(self):
post_flash_script = self._settings.get_post_flash_script()
if post_flash_script:
self._logger.debug("Running post-flash script")
self._logger.debug(post_flash_script)
commands = [line.strip() for line in post_flash_script.splitlines()]
self._printer.commands(commands)
else:
self._logger.debug("No script defined")
def _wait_post_flash_delay(self):
self._logger.debug("Waiting post-flash delay...")
time.sleep(self._settings.get_post_flash_delay())
def _validate_firmware_file(self, file_path):
raise FlasherError("Unsupported function call.")
def handle_connected_event(self):
if self._should_run_post_script:
self._run_post_flash_script()
self._should_run_post_script = False
def check_setup_errors(self):
raise FlasherError("Unsupported function call.")
def upload(self):
self._logger.debug("Firmware uploaded by the user")
uploaded_file_path = flask.request.values["firmware_file." + self._settings.get_upload_path_suffix()]
errors = self._validate_firmware_file(uploaded_file_path)
if errors:
self._push_firmware_info()
return None, errors
result = self._handle_firmware_file(uploaded_file_path)
self._push_firmware_info()
return result
def download(self):
self._logger.debug("Downloading firmware...")
r = requests.get(flask.request.values["url"])
self._logger.debug("Saving downloaded firmware...")
with tempfile.NamedTemporaryFile(delete=False) as temp:
temp.write(r.content)
temp_path = temp.name
errors = self._validate_firmware_file(temp_path)
if errors:
self._push_firmware_info()
os.remove(temp_path)
return None, errors
result = self._handle_firmware_file(temp_path)
self._push_firmware_info()
self._logger.debug("Clearing downloaded firmware...")
os.remove(temp_path)
return result
def _handle_firmware_file(self, firmware_file_path):
raise FlasherError("Unsupported function call.")
def _find_firmware_info(self):
for root, dirs, files in os.walk(self._firmware):
for f in files:
if f == "Version.h":
self._logger.debug("Found Version.h, opening it...")
with open(os.path.join(root, f), "r") as version_file:
for line in version_file:
version = re.findall(r'#define +SHORT_BUILD_VERSION +"([^"]*)"', line)
if version:
self._firmware_version = version[0]
self._logger.debug("Found SHORT_BUILD_VERSION : %s" % self._firmware_version)
break
elif f == "Configuration.h":
self._logger.debug("Found Configuration.h, opening it...")
with open(os.path.join(root, f), "r") as configfile:
for line in configfile:
author = re.findall(r'#define +STRING_CONFIG_H_AUTHOR +"([^"]*)"', line)
if author:
self._firmware_author = author[0]
self._logger.debug("Found STRING_CONFIG_H_AUTHOR : %s" % self._firmware_author)
break
def _firmware_info_event_name(self):
raise FlasherError("Undefined function call")
def _push_firmware_info(self):
self._logger.debug("Sending firmware info through websocket")
self._plugin_manager.send_plugin_message(self._identifier, dict(
type=self._firmware_info_event_name(),
version=self._firmware_version,
author=self._firmware_author,
upload_time=self._firmware_upload_time.strftime("%d/%m/%Y, %H:%M:%S") if self._firmware_upload_time is not None else None,
firmware=self._firmware
))
def _push_flash_status(self, event_name):
if self._flash_status:
data = dict(
type=event_name
)
data.update(self._flash_status)
self._plugin_manager.send_plugin_message(self._identifier, data)
def send_initial_state(self):
self._push_firmware_info()
|
[
"threading.Thread",
"os.remove",
"tempfile.NamedTemporaryFile",
"os.walk",
"re.findall",
"requests.get",
"os.path.join"
] |
[((675, 707), 'threading.Thread', 'Thread', ([], {'target': 'target', 'args': 'args'}), '(target=target, args=args)\n', (681, 707), False, 'from threading import Thread\n'), ((2588, 2629), 'requests.get', 'requests.get', (["flask.request.values['url']"], {}), "(flask.request.values['url'])\n", (2600, 2629), False, 'import requests\n'), ((3069, 3089), 'os.remove', 'os.remove', (['temp_path'], {}), '(temp_path)\n', (3078, 3089), False, 'import os\n'), ((3272, 3295), 'os.walk', 'os.walk', (['self._firmware'], {}), '(self._firmware)\n', (3279, 3295), False, 'import os\n'), ((2691, 2732), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (2718, 2732), False, 'import tempfile\n'), ((2889, 2909), 'os.remove', 'os.remove', (['temp_path'], {}), '(temp_path)\n', (2898, 2909), False, 'import os\n'), ((3414, 3435), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (3426, 3435), False, 'import os\n'), ((3508, 3567), 're.findall', 're.findall', (['"""#define +SHORT_BUILD_VERSION +"([^"]*)\\""""', 'line'], {}), '(\'#define +SHORT_BUILD_VERSION +"([^"]*)"\', line)\n', (3518, 3567), False, 'import re\n'), ((3844, 3865), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (3856, 3865), False, 'import os\n'), ((3933, 3995), 're.findall', 're.findall', (['"""#define +STRING_CONFIG_H_AUTHOR +"([^"]*)\\""""', 'line'], {}), '(\'#define +STRING_CONFIG_H_AUTHOR +"([^"]*)"\', line)\n', (3943, 3995), False, 'import re\n')]
|
from OpenGLCffi.GLX import params
@params(api='glx', prms=['dpy', 'pbuffer', 'params', 'dmbuffer'])
def glXAssociateDMPbufferSGIX(dpy, pbuffer, params, dmbuffer):
pass
|
[
"OpenGLCffi.GLX.params"
] |
[((35, 99), 'OpenGLCffi.GLX.params', 'params', ([], {'api': '"""glx"""', 'prms': "['dpy', 'pbuffer', 'params', 'dmbuffer']"}), "(api='glx', prms=['dpy', 'pbuffer', 'params', 'dmbuffer'])\n", (41, 99), False, 'from OpenGLCffi.GLX import params\n')]
|
#
# Copyright (c) 2013-2022 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
from pygw.config import geowave_pkg
from ..statistic import FieldStatistic
from ..statistic_type import FieldStatisticType
from ...base.interval import IntervalTransformer
class TimeRangeStatistic(FieldStatistic):
"""
Tracks the time range of a temporal field.
"""
STATS_TYPE = FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE)
def __init__(self, type_name=None, field_name=None, java_ref=None):
if java_ref is None:
if type_name is None and field_name is None:
java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic()
else:
java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic(type_name, field_name)
super().__init__(java_ref, IntervalTransformer())
|
[
"pygw.config.geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic"
] |
[((1093, 1155), 'pygw.config.geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic', 'geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic', ([], {}), '()\n', (1153, 1155), False, 'from pygw.config import geowave_pkg\n'), ((1201, 1288), 'pygw.config.geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic', 'geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic', (['type_name', 'field_name'], {}), '(type_name,\n field_name)\n', (1261, 1288), False, 'from pygw.config import geowave_pkg\n')]
|
"""
Project: RadarBook
File: optimum_binary_example.py
Created by: <NAME>
On: 10/11/2018
Created with: PyCharm
Copyright (C) 2019 Artech House (<EMAIL>)
This file is part of Introduction to Radar Using Python and MATLAB
and can not be copied and/or distributed without the express permission of Artech House.
"""
import sys
from Chapter06.ui.OptimumBinary_ui import Ui_MainWindow
from numpy import arange, ceil
from PyQt5.QtWidgets import QApplication, QMainWindow
from matplotlib.backends.qt_compat import QtCore
from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
class OptimumBinary(QMainWindow, Ui_MainWindow):
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
# Connect to the input boxes, when the user presses enter the form updates
self.number_of_pulses.returnPressed.connect(self._update_canvas)
self.target_type.currentIndexChanged.connect(self._update_canvas)
# Set up a figure for the plotting canvas
fig = Figure()
self.axes1 = fig.add_subplot(111)
self.my_canvas = FigureCanvas(fig)
# Add the canvas to the vertical layout
self.verticalLayout.addWidget(self.my_canvas)
self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self))
# Update the canvas for the first display
self._update_canvas()
def _update_canvas(self):
"""
Update the figure when the user changes an input value.
:return:
"""
# Get the parameters from the form
number_of_pulses = int(self.number_of_pulses.text())
# Get the selected target type from the form
target_type = self.target_type.currentText()
if target_type == 'Swerling 0':
alpha = 0.8
beta = -0.02
elif target_type == 'Swerling 1':
alpha = 0.8
beta = -0.02
elif target_type == 'Swerling 2':
alpha = 0.91
beta = -0.38
elif target_type == 'Swerling 3':
alpha = 0.8
beta = -0.02
elif target_type == 'Swerling 4':
alpha = 0.873
beta = -0.27
# Calculate the optimum choice for M
np = arange(1, number_of_pulses+1)
m_optimum = [ceil(10.0 ** beta * n ** alpha) for n in np]
# Clear the axes for the updated plot
self.axes1.clear()
# Display the results
self.axes1.plot(np, m_optimum, '')
# Set the plot title and labels
self.axes1.set_title('Optimum M for Binary Integration', size=14)
self.axes1.set_xlabel('Number of Pulses', size=12)
self.axes1.set_ylabel('M', size=12)
# Set the tick label size
self.axes1.tick_params(labelsize=12)
# Turn on the grid
self.axes1.grid(linestyle=':', linewidth=0.5)
# Update the canvas
self.my_canvas.draw()
def start():
form = OptimumBinary() # Set the form
form.show() # Show the form
def main():
app = QApplication(sys.argv) # A new instance of QApplication
form = OptimumBinary() # Set the form
form.show() # Show the form
app.exec_() # Execute the app
if __name__ == '__main__':
main()
|
[
"numpy.ceil",
"matplotlib.backends.backend_qt5agg.FigureCanvas",
"matplotlib.figure.Figure",
"numpy.arange",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"PyQt5.QtWidgets.QApplication"
] |
[((3146, 3168), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (3158, 3168), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow\n'), ((1104, 1112), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (1110, 1112), False, 'from matplotlib.figure import Figure\n'), ((1181, 1198), 'matplotlib.backends.backend_qt5agg.FigureCanvas', 'FigureCanvas', (['fig'], {}), '(fig)\n', (1193, 1198), False, 'from matplotlib.backends.backend_qt5agg import FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((2337, 2368), 'numpy.arange', 'arange', (['(1)', '(number_of_pulses + 1)'], {}), '(1, number_of_pulses + 1)\n', (2343, 2368), False, 'from numpy import arange, ceil\n'), ((1352, 1391), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.my_canvas', 'self'], {}), '(self.my_canvas, self)\n', (1369, 1391), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((2388, 2419), 'numpy.ceil', 'ceil', (['(10.0 ** beta * n ** alpha)'], {}), '(10.0 ** beta * n ** alpha)\n', (2392, 2419), False, 'from numpy import arange, ceil\n')]
|
from datetime import datetime as dt
from dash.dependencies import Input
from dash.dependencies import Output
from dash.dependencies import State
from flask_login import current_user
import pandas_datareader as pdr
def register_callbacks(dashapp):
@dashapp.callback(
Output('my-graph', 'figure'),
Input('my-dropdown', 'value'),
State('user-store', 'data'))
def update_graph(selected_dropdown_value, data):
df = pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017, 1, 1), end=dt.now())
return {
'data': [{
'x': df.index,
'y': df.Close
}],
'layout': {'margin': {'l': 40, 'r': 0, 't': 20, 'b': 30}}
}
@dashapp.callback(
Output('user-store', 'data'),
Input('my-dropdown', 'value'),
State('user-store', 'data'))
def cur_user(args, data):
if current_user.is_authenticated:
return current_user.username
@dashapp.callback(Output('username', 'children'), Input('user-store', 'data'))
def username(data):
if data is None:
return ''
else:
return f'Hello {data}'
|
[
"dash.dependencies.State",
"datetime.datetime",
"dash.dependencies.Input",
"dash.dependencies.Output",
"datetime.datetime.now"
] |
[((281, 309), 'dash.dependencies.Output', 'Output', (['"""my-graph"""', '"""figure"""'], {}), "('my-graph', 'figure')\n", (287, 309), False, 'from dash.dependencies import Output\n'), ((319, 348), 'dash.dependencies.Input', 'Input', (['"""my-dropdown"""', '"""value"""'], {}), "('my-dropdown', 'value')\n", (324, 348), False, 'from dash.dependencies import Input\n'), ((358, 385), 'dash.dependencies.State', 'State', (['"""user-store"""', '"""data"""'], {}), "('user-store', 'data')\n", (363, 385), False, 'from dash.dependencies import State\n'), ((762, 790), 'dash.dependencies.Output', 'Output', (['"""user-store"""', '"""data"""'], {}), "('user-store', 'data')\n", (768, 790), False, 'from dash.dependencies import Output\n'), ((800, 829), 'dash.dependencies.Input', 'Input', (['"""my-dropdown"""', '"""value"""'], {}), "('my-dropdown', 'value')\n", (805, 829), False, 'from dash.dependencies import Input\n'), ((839, 866), 'dash.dependencies.State', 'State', (['"""user-store"""', '"""data"""'], {}), "('user-store', 'data')\n", (844, 866), False, 'from dash.dependencies import State\n'), ((1004, 1034), 'dash.dependencies.Output', 'Output', (['"""username"""', '"""children"""'], {}), "('username', 'children')\n", (1010, 1034), False, 'from dash.dependencies import Output\n'), ((1036, 1063), 'dash.dependencies.Input', 'Input', (['"""user-store"""', '"""data"""'], {}), "('user-store', 'data')\n", (1041, 1063), False, 'from dash.dependencies import Input\n'), ((503, 517), 'datetime.datetime', 'dt', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (505, 517), True, 'from datetime import datetime as dt\n'), ((523, 531), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (529, 531), True, 'from datetime import datetime as dt\n')]
|
from collections import defaultdict
from decimal import Decimal
from _datetime import datetime, timedelta
from enum import Enum
import math
import random
import re
import requests
import time
from vnpy.app.algo_trading import AlgoTemplate
from vnpy.trader.utility import round_to
from vnpy.trader.constant import Direction, Status, OrderType
from vnpy.trader.object import AccountData, OrderData, TradeData, TickData
from vnpy.trader.engine import BaseEngine
class LiquidMiningAlgo(AlgoTemplate):
""""""
display_name = "交易所 流动性挖坑"
default_setting = {
"vt_symbol": "",
"price_offset": 0.05,
"price_offset_max": 0.1,
"volume": 2,
"max_volume_ratio": 0,
"interval": 3,
"min_order_level": 1,
"min_order_volume": 0,
"sell_max_volume": 0,
"buy_max_volume": 0,
"auto_trade_volume": 310,
"sell_max_ratio": 1,
"buy_max_ratio": 1,
"reward_ratio": 0.01,
"min_pos": 50000,
"max_pos": 50000,
}
variables = [
"pos",
"timer_count",
"vt_ask_orderid",
"vt_bid_orderid"
]
def __init__(
self,
algo_engine: BaseEngine,
algo_name: str,
setting: dict
):
""""""
super().__init__(algo_engine, algo_name, setting)
# Parameters
self.vt_symbol = setting["vt_symbol"]
self.price_offset = setting["price_offset"]
self.price_offset_max = setting["price_offset_max"]
self.volume = setting["volume"]
self.max_volume_ratio = setting.get("max_volume_ratio", 0)
assert 0 <= self.max_volume_ratio <= 1
self.interval = setting["interval"]
self.min_order_level = setting["min_order_level"]
self.min_order_volume = setting["min_order_volume"]
self.sell_max_volume = setting["sell_max_volume"]
self.buy_max_volume = setting["buy_max_volume"]
self.auto_trade_volume = setting["auto_trade_volume"]
self.sell_max_ratio = setting["sell_max_ratio"]
self.buy_max_ratio = setting["buy_max_ratio"]
self.reward_ratio = setting["reward_ratio"]
self.min_pos = setting["min_pos"]
self.max_pos = setting["max_pos"]
self.enable_ioc = setting.get("enable_ioc", False)
self.ioc_intervel = setting.get("ioc_interval", self.interval)
# validate setting
assert self.price_offset <= self.price_offset_max
assert 0 <= self.min_order_level <= 5
# Variables
self.pos = 0
self.timer_count = 0
self.vt_ask_orderid = ""
self.vt_ask_price = 0.0
self.vt_bid_orderid = ""
self.vt_bid_price = 0.0
self.origin_ask_price = 0.00000002
self.origin_bid_price = 0.00000001
self.last_ask_price = 0.00000002
self.last_bid_price = 0.00000001
self.last_ask_volume = 0.0
self.last_bid_volume = 0.0
self.total_ask_volume = 0.0
self.total_bid_volume = 0.0
self.ask_order_level = 0
self.bid_order_level = 0
self.last_tick = None
self._init_market_accounts(self.vt_symbol)
self.subscribe(self.vt_symbol)
self.put_parameters_event()
self.put_variables_event()
def _init_market_accounts(self, active_vt_symbol):
SYMBOL_SPLITTER = re.compile(r"^(\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$")
market_token_pair = active_vt_symbol.split('.')[0]
active_market = active_vt_symbol.split('.')[1]
if not market_token_pair or not active_market:
self.algo_engine.main_engine.write_log(f"ERROR: parse active_vt {active_vt_symbol} failed")
return False
token_pair_match = SYMBOL_SPLITTER.match(market_token_pair.upper())
if not token_pair_match:
self.algo_engine.main_engine.write_log(f"ERROR: parse symbol {market_token_pair} failed")
return False
self.market_vt_tokens = [
f"{active_market}.{token_pair_match.group(1)}",
f"{active_market}.{token_pair_match.group(2)}"
]
self.current_balance = {}
self._update_current_balance()
def _update_current_balance(self):
for vt_token in self.market_vt_tokens:
user_account = self.algo_engine.main_engine.get_account(vt_token)
if type(user_account) is not AccountData:
return False
self.current_balance[vt_token] = user_account.balance
return True
def on_start(self):
""""""
random.seed(time.time())
self.write_log(f"开始流动性挖矿: {self.price_offset}, {self.price_offset_max}, {self.volume}, {self.interval}, {self.min_order_level}, {self.min_order_volume}, {self.sell_max_volume}, {self.buy_max_volume}, {self.auto_trade_volume}")
self.pricetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick
self.volumetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume
assert self.pricetick > 0
def on_tick(self, tick: TickData):
""""""
self.last_tick = tick
market_price = (tick.ask_price_1 + tick.bid_price_1) / 2
if self.vt_ask_orderid != "":
self.ask_order_alive_tick += 1
# if time to kill
cancel_ask = False
if self.enable_ioc and self.ask_order_alive_tick > self.ioc_intervel:
self.write_log(f"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick} ticks > {self.ioc_intervel},取消")
cancel_ask = True
if not cancel_ask:
total_ask_volume = 0
for num_level in range(1, 6):
ask_price = getattr(tick, f"ask_price_{num_level}")
if 0 < ask_price < self.last_ask_price:
total_ask_volume += getattr(tick, f"ask_volume_{num_level}")
# min_ask_price = getattr(tick, f"ask_price_{self.ask_order_level}") if self.ask_order_level > 0 else market_price
# vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick)
vt_ask_price = getattr(tick, f"ask_price_1")
if self.vt_ask_price < vt_ask_price:
cancel_ask = True
self.write_log(f"当前卖单{self.vt_ask_price} 低于最新卖{self.ask_order_level}价 {vt_ask_price},取消")
elif self.vt_ask_price > vt_ask_price:
cancel_ask = True
self.write_log(f"当前卖单{self.vt_ask_price} 高于最新卖{self.ask_order_level}价 {vt_ask_price},取消")
elif abs(self.total_ask_volume - total_ask_volume) > (self.total_ask_volume / 2):
cancel_ask = True
self.write_log(f"---> 当前卖单{self.vt_ask_price} 取消,因为之前的订单量发生了变化")
if cancel_ask:
self.cancel_order(self.vt_ask_orderid)
# self.ask_order_alive_tick = 0
if self.vt_bid_orderid != "":
self.bid_order_alive_tick += 1
# if time to kill
cancel_bid = False
if self.enable_ioc and self.bid_order_alive_tick > self.ioc_intervel:
self.write_log(f"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick} ticks > {self.ioc_intervel},取消")
cancel_bid = True
if not cancel_bid:
total_bid_volume = 0
for num_level in range(1, 6):
bid_price = getattr(tick, f"bid_price_{num_level}")
if bid_price > self.last_bid_price:
total_bid_volume += getattr(tick, f"bid_volume_{num_level}")
# max_bid_price = getattr(tick, f"bid_price_{self.bid_order_level}") if self.bid_order_level > 0 else market_price
# vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick)
vt_bid_price = getattr(tick, f"bid_price_1")
if self.vt_bid_price > vt_bid_price:
cancel_bid = True
self.write_log(f"当前买单{self.vt_bid_price} 高于最新买{self.bid_order_level}价 {vt_bid_price},取消")
elif self.vt_bid_price < vt_bid_price:
cancel_bid = True
self.write_log(f"当前买单{self.vt_bid_price} 低于最新买{self.bid_order_level}价 {vt_bid_price},取消")
elif abs(self.total_bid_volume - total_bid_volume) > (self.total_bid_volume / 2):
cancel_bid = True
self.write_log(f"---> 当前买单{self.vt_bid_price} 取消,因为之前的订单量发生了变化")
if cancel_bid:
self.cancel_order(self.vt_bid_orderid)
# self.bid_order_alive_tick = 0
def on_timer(self):
""""""
if not self.last_tick:
return
if self.pos < self.min_pos or self.pos > self.max_pos:
self.cancel_all()
self.write_log(f"当前持仓: {self.pos} 超出[{self.min_pos}, {self.max_pos}]范围,停止流动性挖矿")
return
self.timer_count += 1
if self.timer_count < self.interval:
self.put_variables_event()
return
self.timer_count = 0
self.write_log(f"当前余额 {self.current_balance}, 持仓 {self.pos}")
if not self._update_current_balance():
self.write_log(f"查询余额失败,上次余额: [{self.current_balance}]")
return
use_max_volume = self.max_volume_ratio > 0
max_volume_ratio = self.max_volume_ratio
market_price = (self.last_tick.ask_price_1 + self.last_tick.bid_price_1) / 2
if self.vt_ask_orderid == "":
self.ask_order_level = 0
for num_level in range(self.min_order_level, 0, -1):
ask_price = getattr(self.last_tick, f"ask_price_{num_level}")
if 0 < ask_price < market_price * (1 + self.reward_ratio * 0.99):
self.ask_order_level = num_level
break
if self.ask_order_level > 0:
total_ask_volume = 0
for num_level in range(1, self.ask_order_level + 1):
total_ask_volume += getattr(self.last_tick, f"ask_volume_{num_level}")
if total_ask_volume != self.last_ask_volume:
one_ask_price = getattr(self.last_tick, f"ask_price_1")
one_ask_volume = getattr(self.last_tick, f"ask_volume_1")
min_ask_price = getattr(self.last_tick, f"ask_price_{self.ask_order_level}") if self.ask_order_level > 0 else market_price
vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick)
if self.origin_ask_price == 0.00000002:
self.origin_ask_price = vt_ask_price
ask_condition0 = self.last_ask_price == 0.00000002
ask_condition1 = (self.last_ask_price * (1 - self.price_offset)) < vt_ask_price < (self.last_ask_price * (1 + self.price_offset))
ask_condition2 = vt_ask_price > (self.origin_ask_price * (1 - self.price_offset_max))
ask_condition8 = one_ask_price < (self.origin_ask_price * (1 - self.price_offset_max * 2))
self.write_log(f"---> 流动性挖矿卖出condition1: {ask_condition1}, condition2: {ask_condition2}")
if ask_condition0 or (ask_condition1 and ask_condition2):
self.last_ask_price = vt_ask_price
self.vt_ask_price = one_ask_price
self.total_ask_volume = total_ask_volume
max_volume = self.current_balance[self.market_vt_tokens[0]] * self.sell_max_ratio
if 0 < self.sell_max_volume < max_volume:
max_volume = self.sell_max_volume
min_volume = self.volume * total_ask_volume
if self.min_order_volume > 0 and min_volume < self.min_order_volume:
min_volume = self.min_order_volume
volume = min_volume if not use_max_volume else max_volume * max_volume_ratio
if volume >= max_volume:
volume = max_volume
self.last_ask_volume = round_to(volume - self.volumetick, self.volumetick)
self.write_log(f"流动性挖矿卖出价格: {vt_ask_price}, 量: {self.last_ask_volume}")
self.vt_ask_orderid = self.sell(self.vt_symbol, vt_ask_price, self.last_ask_volume)
self.ask_order_alive_tick = 0
elif ask_condition8 and one_ask_volume < self.auto_trade_volume:
self.write_log(f"---> 流动性挖矿买入低价one_ask_price: {one_ask_price}, one_ask_volume: {one_ask_volume}")
self.buy(self.vt_symbol, one_ask_price, one_ask_volume)
else:
self.write_log(f"---> 流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量")
else:
self.write_log(f"---> 流动性挖矿卖出下单失败,因为没有合适的下单位置")
if self.vt_bid_orderid == "":
self.bid_order_level = 0
for num_level in range(self.min_order_level, 0, -1):
bid_price = getattr(self.last_tick, f"bid_price_{num_level}")
if bid_price > market_price * (1 - self.reward_ratio * 0.99):
self.bid_order_level = num_level
break
if self.bid_order_level > 0:
total_bid_volume = 0
for num_level in range(1, self.bid_order_level + 1):
total_bid_volume += getattr(self.last_tick, f"bid_volume_{num_level}")
if total_bid_volume != self.last_bid_volume:
one_bid_price = getattr(self.last_tick, f"bid_price_1")
one_bid_volume = getattr(self.last_tick, f"bid_volume_1")
max_bid_price = getattr(self.last_tick, f"bid_price_{self.bid_order_level}") if self.bid_order_level > 0 else market_price
vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick)
if self.origin_bid_price == 0.00000001:
self.origin_bid_price = vt_bid_price
bid_condition0 = self.last_bid_price == 0.00000001
bid_condition1 = (self.last_bid_price * (1 - self.price_offset)) < vt_bid_price < (self.last_bid_price * (1 + self.price_offset))
bid_condition2 = vt_bid_price < (self.origin_bid_price * (1 + self.price_offset_max))
bid_condition8 = one_bid_price > (self.origin_bid_price * (1 + self.price_offset_max * 2))
self.write_log(f"---> 流动性挖矿买入condition1: {bid_condition1}, condition2: {bid_condition2}")
if bid_condition0 or (bid_condition1 and bid_condition2):
self.last_bid_price = vt_bid_price
self.vt_bid_price = one_bid_price
self.total_bid_volume = total_bid_volume
max_volume = self.current_balance[self.market_vt_tokens[1]] * self.buy_max_ratio / vt_bid_price
if 0 < self.buy_max_volume < max_volume:
max_volume = self.buy_max_volume
min_volume = self.volume * total_bid_volume
if self.min_order_volume > 0 and min_volume < self.min_order_volume:
min_volume = self.min_order_volume
volume = min_volume if not use_max_volume else max_volume * max_volume_ratio
if volume >= max_volume:
volume = max_volume
self.last_bid_volume = round_to(volume - self.volumetick, self.volumetick)
self.write_log(f"流动性挖矿买入价格: {vt_bid_price}, 量: {self.last_bid_volume}")
self.vt_bid_orderid = self.buy(self.vt_symbol, vt_bid_price, self.last_bid_volume)
self.bid_order_alive_tick = 0
elif bid_condition8 and one_bid_volume < self.auto_trade_volume:
self.write_log(f"---> 流动性挖矿卖出高价one_bid_price: {one_bid_price}, one_bid_volume: {one_bid_volume}")
self.sell(self.vt_symbol, one_bid_price, one_bid_volume)
else:
self.write_log(f"---> 流动性挖矿买入下单失败,因为买单总数量等于上一单数量")
else:
self.write_log(f"---> 流动性挖矿买入下单失败,因为没有合适的下单位置")
self.put_variables_event()
def on_order(self, order: OrderData):
""""""
if order.vt_orderid == self.vt_ask_orderid:
if not order.is_active():
self.vt_ask_orderid = ""
self.vt_ask_price = 0.0
elif order.vt_orderid == self.vt_bid_orderid:
if not order.is_active():
self.vt_bid_orderid = ""
self.vt_bid_price = 0.0
self.put_variables_event()
def on_trade(self, trade: TradeData):
""""""
if trade.direction == Direction.SHORT:
self.write_log(f"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}")
self.pos -= trade.volume
elif trade.direction == Direction.LONG:
self.write_log(f"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}")
self.pos += trade.volume
self.put_variables_event()
def on_stop(self):
""""""
self.write_log("停止 流动性挖矿")
# self.write_log(f"账户状态:{self.algo_engine.main_engine.get_all_accounts()}")
time.sleep(5)
|
[
"time.sleep",
"time.time",
"vnpy.trader.utility.round_to",
"re.compile"
] |
[((3472, 3545), 're.compile', 're.compile', (['"""^(\\\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$"""'], {}), "('^(\\\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$')\n", (3482, 3545), False, 'import re\n'), ((17713, 17726), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (17723, 17726), False, 'import time\n'), ((4712, 4723), 'time.time', 'time.time', ([], {}), '()\n', (4721, 4723), False, 'import time\n'), ((10685, 10741), 'vnpy.trader.utility.round_to', 'round_to', (['(min_ask_price + self.pricetick)', 'self.pricetick'], {}), '(min_ask_price + self.pricetick, self.pricetick)\n', (10693, 10741), False, 'from vnpy.trader.utility import round_to\n'), ((14153, 14209), 'vnpy.trader.utility.round_to', 'round_to', (['(max_bid_price - self.pricetick)', 'self.pricetick'], {}), '(max_bid_price - self.pricetick, self.pricetick)\n', (14161, 14209), False, 'from vnpy.trader.utility import round_to\n'), ((12374, 12425), 'vnpy.trader.utility.round_to', 'round_to', (['(volume - self.volumetick)', 'self.volumetick'], {}), '(volume - self.volumetick, self.volumetick)\n', (12382, 12425), False, 'from vnpy.trader.utility import round_to\n'), ((15854, 15905), 'vnpy.trader.utility.round_to', 'round_to', (['(volume - self.volumetick)', 'self.volumetick'], {}), '(volume - self.volumetick, self.volumetick)\n', (15862, 15905), False, 'from vnpy.trader.utility import round_to\n')]
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowCertificateResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'status': 'str',
'order_id': 'str',
'name': 'str',
'type': 'str',
'brand': 'str',
'push_support': 'str',
'revoke_reason': 'str',
'signature_algrithm': 'str',
'issue_time': 'str',
'not_before': 'str',
'not_after': 'str',
'validity_period': 'int',
'validation_method': 'str',
'domain_type': 'str',
'domain': 'str',
'sans': 'str',
'domain_count': 'int',
'wildcard_count': 'int',
'authentification': 'list[Authentification]'
}
attribute_map = {
'id': 'id',
'status': 'status',
'order_id': 'order_id',
'name': 'name',
'type': 'type',
'brand': 'brand',
'push_support': 'push_support',
'revoke_reason': 'revoke_reason',
'signature_algrithm': 'signature_algrithm',
'issue_time': 'issue_time',
'not_before': 'not_before',
'not_after': 'not_after',
'validity_period': 'validity_period',
'validation_method': 'validation_method',
'domain_type': 'domain_type',
'domain': 'domain',
'sans': 'sans',
'domain_count': 'domain_count',
'wildcard_count': 'wildcard_count',
'authentification': 'authentification'
}
def __init__(self, id=None, status=None, order_id=None, name=None, type=None, brand=None, push_support=None, revoke_reason=None, signature_algrithm=None, issue_time=None, not_before=None, not_after=None, validity_period=None, validation_method=None, domain_type=None, domain=None, sans=None, domain_count=None, wildcard_count=None, authentification=None):
"""ShowCertificateResponse - a model defined in huaweicloud sdk"""
super(ShowCertificateResponse, self).__init__()
self._id = None
self._status = None
self._order_id = None
self._name = None
self._type = None
self._brand = None
self._push_support = None
self._revoke_reason = None
self._signature_algrithm = None
self._issue_time = None
self._not_before = None
self._not_after = None
self._validity_period = None
self._validation_method = None
self._domain_type = None
self._domain = None
self._sans = None
self._domain_count = None
self._wildcard_count = None
self._authentification = None
self.discriminator = None
if id is not None:
self.id = id
if status is not None:
self.status = status
if order_id is not None:
self.order_id = order_id
if name is not None:
self.name = name
if type is not None:
self.type = type
if brand is not None:
self.brand = brand
if push_support is not None:
self.push_support = push_support
if revoke_reason is not None:
self.revoke_reason = revoke_reason
if signature_algrithm is not None:
self.signature_algrithm = signature_algrithm
if issue_time is not None:
self.issue_time = issue_time
if not_before is not None:
self.not_before = not_before
if not_after is not None:
self.not_after = not_after
if validity_period is not None:
self.validity_period = validity_period
if validation_method is not None:
self.validation_method = validation_method
if domain_type is not None:
self.domain_type = domain_type
if domain is not None:
self.domain = domain
if sans is not None:
self.sans = sans
if domain_count is not None:
self.domain_count = domain_count
if wildcard_count is not None:
self.wildcard_count = wildcard_count
if authentification is not None:
self.authentification = authentification
@property
def id(self):
"""Gets the id of this ShowCertificateResponse.
证书id。
:return: The id of this ShowCertificateResponse.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ShowCertificateResponse.
证书id。
:param id: The id of this ShowCertificateResponse.
:type: str
"""
self._id = id
@property
def status(self):
"""Gets the status of this ShowCertificateResponse.
证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 - CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。
:return: The status of this ShowCertificateResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ShowCertificateResponse.
证书状态。取值如下: - PAID:证书已支付,待申请证书。 - ISSUED:证书已签发。 - CHECKING:证书申请审核中。 - CANCELCHECKING:取消证书申请审核中。 - UNPASSED:证书申请未通过。 - EXPIRED:证书已过期。 - REVOKING:证书吊销申请审核中。 - REVOKED:证书已吊销。 - UPLOAD:证书托管中。 - SUPPLEMENTCHECKING:多域名证书新增附加域名审核中。 - CANCELSUPPLEMENTING:取消新增附加域名审核中。
:param status: The status of this ShowCertificateResponse.
:type: str
"""
self._status = status
@property
def order_id(self):
"""Gets the order_id of this ShowCertificateResponse.
订单id。
:return: The order_id of this ShowCertificateResponse.
:rtype: str
"""
return self._order_id
@order_id.setter
def order_id(self, order_id):
"""Sets the order_id of this ShowCertificateResponse.
订单id。
:param order_id: The order_id of this ShowCertificateResponse.
:type: str
"""
self._order_id = order_id
@property
def name(self):
"""Gets the name of this ShowCertificateResponse.
证书名称。
:return: The name of this ShowCertificateResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ShowCertificateResponse.
证书名称。
:param name: The name of this ShowCertificateResponse.
:type: str
"""
self._name = name
@property
def type(self):
"""Gets the type of this ShowCertificateResponse.
证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。
:return: The type of this ShowCertificateResponse.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ShowCertificateResponse.
证书类型。取值如下: DV_SSL_CERT、DV_SSL_CERT_BASIC、EV_SSL_CERT、 EV_SSL_CERT_PRO、OV_SSL_CERT、OV_SSL_CERT_PRO。
:param type: The type of this ShowCertificateResponse.
:type: str
"""
self._type = type
@property
def brand(self):
"""Gets the brand of this ShowCertificateResponse.
证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。
:return: The brand of this ShowCertificateResponse.
:rtype: str
"""
return self._brand
@brand.setter
def brand(self, brand):
"""Sets the brand of this ShowCertificateResponse.
证书品牌。取值如下: GLOBALSIGN、SYMANTEC、GEOTRUST、CFCA。
:param brand: The brand of this ShowCertificateResponse.
:type: str
"""
self._brand = brand
@property
def push_support(self):
"""Gets the push_support of this ShowCertificateResponse.
证书是否支持推送。
:return: The push_support of this ShowCertificateResponse.
:rtype: str
"""
return self._push_support
@push_support.setter
def push_support(self, push_support):
"""Sets the push_support of this ShowCertificateResponse.
证书是否支持推送。
:param push_support: The push_support of this ShowCertificateResponse.
:type: str
"""
self._push_support = push_support
@property
def revoke_reason(self):
"""Gets the revoke_reason of this ShowCertificateResponse.
证书吊销原因。
:return: The revoke_reason of this ShowCertificateResponse.
:rtype: str
"""
return self._revoke_reason
@revoke_reason.setter
def revoke_reason(self, revoke_reason):
"""Sets the revoke_reason of this ShowCertificateResponse.
证书吊销原因。
:param revoke_reason: The revoke_reason of this ShowCertificateResponse.
:type: str
"""
self._revoke_reason = revoke_reason
@property
def signature_algrithm(self):
"""Gets the signature_algrithm of this ShowCertificateResponse.
签名算法。
:return: The signature_algrithm of this ShowCertificateResponse.
:rtype: str
"""
return self._signature_algrithm
@signature_algrithm.setter
def signature_algrithm(self, signature_algrithm):
"""Sets the signature_algrithm of this ShowCertificateResponse.
签名算法。
:param signature_algrithm: The signature_algrithm of this ShowCertificateResponse.
:type: str
"""
self._signature_algrithm = signature_algrithm
@property
def issue_time(self):
"""Gets the issue_time of this ShowCertificateResponse.
证书签发时间,没有获取到有效值时为空。
:return: The issue_time of this ShowCertificateResponse.
:rtype: str
"""
return self._issue_time
@issue_time.setter
def issue_time(self, issue_time):
"""Sets the issue_time of this ShowCertificateResponse.
证书签发时间,没有获取到有效值时为空。
:param issue_time: The issue_time of this ShowCertificateResponse.
:type: str
"""
self._issue_time = issue_time
@property
def not_before(self):
"""Gets the not_before of this ShowCertificateResponse.
证书生效时间,没有获取到有效值时为空。
:return: The not_before of this ShowCertificateResponse.
:rtype: str
"""
return self._not_before
@not_before.setter
def not_before(self, not_before):
"""Sets the not_before of this ShowCertificateResponse.
证书生效时间,没有获取到有效值时为空。
:param not_before: The not_before of this ShowCertificateResponse.
:type: str
"""
self._not_before = not_before
@property
def not_after(self):
"""Gets the not_after of this ShowCertificateResponse.
证书失效时间,没有获取到有效值时为空。
:return: The not_after of this ShowCertificateResponse.
:rtype: str
"""
return self._not_after
@not_after.setter
def not_after(self, not_after):
"""Sets the not_after of this ShowCertificateResponse.
证书失效时间,没有获取到有效值时为空。
:param not_after: The not_after of this ShowCertificateResponse.
:type: str
"""
self._not_after = not_after
@property
def validity_period(self):
"""Gets the validity_period of this ShowCertificateResponse.
证书有效期,按月为单位。
:return: The validity_period of this ShowCertificateResponse.
:rtype: int
"""
return self._validity_period
@validity_period.setter
def validity_period(self, validity_period):
"""Sets the validity_period of this ShowCertificateResponse.
证书有效期,按月为单位。
:param validity_period: The validity_period of this ShowCertificateResponse.
:type: int
"""
self._validity_period = validity_period
@property
def validation_method(self):
"""Gets the validation_method of this ShowCertificateResponse.
域名认证方式,取值如下:DNS、FILE、EMAIL。
:return: The validation_method of this ShowCertificateResponse.
:rtype: str
"""
return self._validation_method
@validation_method.setter
def validation_method(self, validation_method):
"""Sets the validation_method of this ShowCertificateResponse.
域名认证方式,取值如下:DNS、FILE、EMAIL。
:param validation_method: The validation_method of this ShowCertificateResponse.
:type: str
"""
self._validation_method = validation_method
@property
def domain_type(self):
"""Gets the domain_type of this ShowCertificateResponse.
域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名
:return: The domain_type of this ShowCertificateResponse.
:rtype: str
"""
return self._domain_type
@domain_type.setter
def domain_type(self, domain_type):
"""Sets the domain_type of this ShowCertificateResponse.
域名类型,取值如下: - SINGLE_DOMAIN:单域名 - WILDCARD:通配符 - MULTI_DOMAIN:多域名
:param domain_type: The domain_type of this ShowCertificateResponse.
:type: str
"""
self._domain_type = domain_type
@property
def domain(self):
"""Gets the domain of this ShowCertificateResponse.
证书绑定域名。
:return: The domain of this ShowCertificateResponse.
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this ShowCertificateResponse.
证书绑定域名。
:param domain: The domain of this ShowCertificateResponse.
:type: str
"""
self._domain = domain
@property
def sans(self):
"""Gets the sans of this ShowCertificateResponse.
证书绑定的附加域名信息。
:return: The sans of this ShowCertificateResponse.
:rtype: str
"""
return self._sans
@sans.setter
def sans(self, sans):
"""Sets the sans of this ShowCertificateResponse.
证书绑定的附加域名信息。
:param sans: The sans of this ShowCertificateResponse.
:type: str
"""
self._sans = sans
@property
def domain_count(self):
"""Gets the domain_count of this ShowCertificateResponse.
证书可绑定域名个数。
:return: The domain_count of this ShowCertificateResponse.
:rtype: int
"""
return self._domain_count
@domain_count.setter
def domain_count(self, domain_count):
"""Sets the domain_count of this ShowCertificateResponse.
证书可绑定域名个数。
:param domain_count: The domain_count of this ShowCertificateResponse.
:type: int
"""
self._domain_count = domain_count
@property
def wildcard_count(self):
"""Gets the wildcard_count of this ShowCertificateResponse.
证书可绑定附加域名个数。
:return: The wildcard_count of this ShowCertificateResponse.
:rtype: int
"""
return self._wildcard_count
@wildcard_count.setter
def wildcard_count(self, wildcard_count):
"""Sets the wildcard_count of this ShowCertificateResponse.
证书可绑定附加域名个数。
:param wildcard_count: The wildcard_count of this ShowCertificateResponse.
:type: int
"""
self._wildcard_count = wildcard_count
@property
def authentification(self):
"""Gets the authentification of this ShowCertificateResponse.
域名所有权认证信息,详情请参见Authentification字段数据结构说明。
:return: The authentification of this ShowCertificateResponse.
:rtype: list[Authentification]
"""
return self._authentification
@authentification.setter
def authentification(self, authentification):
"""Sets the authentification of this ShowCertificateResponse.
域名所有权认证信息,详情请参见Authentification字段数据结构说明。
:param authentification: The authentification of this ShowCertificateResponse.
:type: list[Authentification]
"""
self._authentification = authentification
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowCertificateResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"huaweicloudsdkcore.utils.http_utils.sanitize_for_serialization",
"six.iteritems",
"sys.setdefaultencoding"
] |
[((16545, 16578), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (16558, 16578), False, 'import six\n'), ((17563, 17594), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (17585, 17594), False, 'import sys\n'), ((17621, 17653), 'huaweicloudsdkcore.utils.http_utils.sanitize_for_serialization', 'sanitize_for_serialization', (['self'], {}), '(self)\n', (17647, 17653), False, 'from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization\n')]
|
import asyncio
import os
import json
from winrt.windows.media.control import \
GlobalSystemMediaTransportControlsSessionManager as MediaManager
from winrt.windows.storage.streams import \
DataReader, Buffer, InputStreamOptions
async def get_media_info():
sessions = await MediaManager.request_async()
# This source_app_user_model_id check and if statement is optional
# Use it if you want to only get a certain player/program's media
# (e.g. only chrome.exe's media not any other program's).
# To get the ID, use a breakpoint() to run sessions.get_current_session()
# while the media you want to get is playing.
# Then set TARGET_ID to the string this call returns.
current_session = sessions.get_current_session()
if current_session: # there needs to be a media session running
info = await current_session.try_get_media_properties_async()
# song_attr[0] != '_' ignores system attributes
info_dict = {song_attr: info.__getattribute__(song_attr) for song_attr in dir(info) if song_attr[0] != '_'}
# converts winrt vector to list
info_dict['genres'] = list(info_dict['genres'])
# create the current_media_info dict with the earlier code first
thumb_stream_ref = info_dict['thumbnail']
try:
filename="./static/media_thumb.jpg"
if os.path.exists(filename):
os.remove(filename)
# 5MB (5 million byte) buffer - thumbnail unlikely to be larger
thumb_read_buffer = Buffer(5000000)
await read_stream_into_buffer(thumb_stream_ref, thumb_read_buffer)
buffer_reader = DataReader.from_buffer(thumb_read_buffer)
byte_buffer = buffer_reader.read_bytes(thumb_read_buffer.length)
if not os.path.exists('static'):
os.makedirs('static')
filename="./static/media_thumb.jpg"
if not len(bytearray(byte_buffer)) ==0:
with open(filename, 'wb+') as fobj:
fobj.write(bytearray(byte_buffer))
info_dict["thumbnail"]=filename[1:]
except Exception as e:
# print(e)
# print("something went wrong with getting thumbnail")
info_dict["thumbnail"]=" "
return info_dict
return None
async def read_stream_into_buffer(stream_ref, buffer):
readable_stream = await stream_ref.open_read_async()
readable_stream.read_async(buffer, buffer.capacity, InputStreamOptions.READ_AHEAD)
if __name__ == '__main__':
print(json.dumps(asyncio.run(get_media_info())))
|
[
"os.remove",
"winrt.windows.media.control.GlobalSystemMediaTransportControlsSessionManager.request_async",
"os.makedirs",
"os.path.exists",
"winrt.windows.storage.streams.Buffer",
"winrt.windows.storage.streams.DataReader.from_buffer"
] |
[((285, 313), 'winrt.windows.media.control.GlobalSystemMediaTransportControlsSessionManager.request_async', 'MediaManager.request_async', ([], {}), '()\n', (311, 313), True, 'from winrt.windows.media.control import GlobalSystemMediaTransportControlsSessionManager as MediaManager\n'), ((1367, 1391), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1381, 1391), False, 'import os\n'), ((1537, 1552), 'winrt.windows.storage.streams.Buffer', 'Buffer', (['(5000000)'], {}), '(5000000)\n', (1543, 1552), False, 'from winrt.windows.storage.streams import DataReader, Buffer, InputStreamOptions\n'), ((1660, 1701), 'winrt.windows.storage.streams.DataReader.from_buffer', 'DataReader.from_buffer', (['thumb_read_buffer'], {}), '(thumb_read_buffer)\n', (1682, 1701), False, 'from winrt.windows.storage.streams import DataReader, Buffer, InputStreamOptions\n'), ((1409, 1428), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (1418, 1428), False, 'import os\n'), ((1798, 1822), 'os.path.exists', 'os.path.exists', (['"""static"""'], {}), "('static')\n", (1812, 1822), False, 'import os\n'), ((1840, 1861), 'os.makedirs', 'os.makedirs', (['"""static"""'], {}), "('static')\n", (1851, 1861), False, 'import os\n')]
|
import FWCore.ParameterSet.Config as cms
#from ..modules.hltL1TkMuons_cfi import *
from ..modules.hltDoubleMuon7DZ1p0_cfi import *
from ..modules.hltL1TkDoubleMuFiltered7_cfi import *
from ..modules.hltL1TkSingleMuFiltered15_cfi import *
from ..sequences.HLTBeginSequence_cfi import *
from ..sequences.HLTEndSequence_cfi import *
L1T_DoubleTkMuon_15_7 = cms.Path(
HLTBeginSequence +
# hltL1TkMuons +
hltL1TkDoubleMuFiltered7 +
hltL1TkSingleMuFiltered15 +
hltDoubleMuon7DZ1p0 +
HLTEndSequence
)
|
[
"FWCore.ParameterSet.Config.Path"
] |
[((356, 480), 'FWCore.ParameterSet.Config.Path', 'cms.Path', (['(HLTBeginSequence + hltL1TkDoubleMuFiltered7 + hltL1TkSingleMuFiltered15 +\n hltDoubleMuon7DZ1p0 + HLTEndSequence)'], {}), '(HLTBeginSequence + hltL1TkDoubleMuFiltered7 +\n hltL1TkSingleMuFiltered15 + hltDoubleMuon7DZ1p0 + HLTEndSequence)\n', (364, 480), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
from sympy import *
from sympy.matrices import *
import os
import re
import argparse
# local
import pretty_print
def sqr(a):
return a * a
def trunc_acos(x):
tmp = Piecewise((0.0, x >= 1.0), (pi, x <= -1.0), (acos(x), True))
return tmp.subs(x, x)
def eigs_2d(mat):
a = mat[0, 0] + mat[1, 1]
delta = (mat[0, 0] - mat[1, 1])**2 + 4 * mat[0, 1]**2
tmp1 = Piecewise(
(a / 2, delta < 1e-10),
((a - sqrt(delta)) / 2.0, True)
)
tmp2 = Piecewise(
(a / 2, delta < 1e-10),
((a + sqrt(delta)) / 2.0, True)
)
return tmp1.subs(delta, delta), tmp2.subs(delta, delta)
def eigs_3d(mat):
b = mat[0] + mat[4] + mat[8]
t = sqr(mat[1]) + sqr(mat[2]) + sqr(mat[5])
p = 0.5 * (sqr(mat[0] - mat[4]) + sqr(mat[0] - mat[8]) + sqr(mat[4] - mat[8]))
p += 3.0 * t
q = 18.0 * (mat[0] * mat[4] * mat[8] + 3.0 * mat[1] * mat[2] * mat[5])
q += 2.0 * (mat[0] * sqr(mat[0]) + mat[4] * sqr(mat[4]) + mat[8] * sqr(mat[8]))
q += 9.0 * b * t
q -= 3.0 * (mat[0] + mat[4]) * (mat[0] + mat[8]) * (mat[4] + mat[8])
q -= 27.0 * (mat[0] * sqr(mat[5]) + mat[4] * sqr(mat[2]) + mat[8] * sqr(mat[1]))
delta = trunc_acos(0.5 * q / sqrt(p * sqr(p)))
p = 2.0 * sqrt(p)
tmp1 = Piecewise(
(b / 3.0, p < 1e-10),
((b + p * cos(delta / 3.0)) / 3.0, True)
)
tmp2 = Piecewise(
(b / 3.0, p < 1e-10),
((b + p * cos((delta + 2.0 * pi) / 3.0)) / 3.0, True)
)
tmp3 = Piecewise(
(b / 3.0, p < 1e-10),
((b + p * cos((delta - 2.0 * pi) / 3.0)) / 3.0, True)
)
return tmp1.subs(p, p), tmp2.subs(p, p), tmp3.subs(p, p)
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("output", type=str, help="path to the output folder")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
dims = [2, 3]
cpp = "#include <polyfem/auto_eigs.hpp>\n\n\n"
hpp = "#pragma once\n\n#include <Eigen/Dense>\n\n"
cpp = cpp + "namespace polyfem {\nnamespace autogen " + "{\n"
hpp = hpp + "namespace polyfem {\nnamespace autogen " + "{\n"
hpp = hpp + "template<typename T>\nT int_pow(T val, int exp) { T res = exp <=0 ? T(0.): val; for(int i = 1; i < exp; ++i) res = res*val; return res; }\n\n"
lambdaa = Symbol('lambda', real=True)
for dim in dims:
print("processing " + str(dim))
M = zeros(dim, dim)
for i in range(0, dim):
for j in range(0, dim):
if i <= j:
M[i, j] = Symbol('m[' + str(i) + ',' + str(j) + ']', real=True)
else:
M[i, j] = Symbol('m[' + str(j) + ',' + str(i) + ']', real=True)
if dim == 2:
lambdas = eigs_2d(M)
else:
lambdas = eigs_3d(M)
# lambdas = simplify(lambdas)
c99 = pretty_print.C99_print(lambdas)
c99 = re.sub(r"m\[(\d{1}),(\d{1})\]", r'm(\1,\2)', c99)
c99 = re.sub(r"result_(\d{1})", r'res(\1)', c99)
c99 = c99.replace("0.0", "T(0)")
c99 = c99.replace(" M_PI", " T(M_PI)")
signature = "template<typename T>\nvoid eigs_" + str(dim) + "d(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3> &m, "
signature += "Eigen::Matrix<T, Eigen::Dynamic, 1, 0, 3, 1> &res)"
hpp = hpp + signature + " {\nres.resize(" + str(dim) + ");\n" + c99 + "\n}\n\n"
cpp = cpp + "\n"
hpp = hpp + "\n"
cpp = cpp + "\n}}\n"
hpp = hpp + "\n}}\n"
path = os.path.abspath(args.output)
print("saving...")
with open(os.path.join(path, "auto_eigs.cpp"), "w") as file:
file.write(cpp)
with open(os.path.join(path, "auto_eigs.hpp"), "w") as file:
file.write(hpp)
print("done!")
|
[
"os.path.abspath",
"argparse.ArgumentParser",
"pretty_print.C99_print",
"os.path.join",
"re.sub"
] |
[((1692, 1795), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (1715, 1795), False, 'import argparse\n'), ((3622, 3650), 'os.path.abspath', 'os.path.abspath', (['args.output'], {}), '(args.output)\n', (3637, 3650), False, 'import os\n'), ((2965, 2996), 'pretty_print.C99_print', 'pretty_print.C99_print', (['lambdas'], {}), '(lambdas)\n', (2987, 2996), False, 'import pretty_print\n'), ((3012, 3065), 're.sub', 're.sub', (['"""m\\\\[(\\\\d{1}),(\\\\d{1})\\\\]"""', '"""m(\\\\1,\\\\2)"""', 'c99'], {}), "('m\\\\[(\\\\d{1}),(\\\\d{1})\\\\]', 'm(\\\\1,\\\\2)', c99)\n", (3018, 3065), False, 'import re\n'), ((3076, 3118), 're.sub', 're.sub', (['"""result_(\\\\d{1})"""', '"""res(\\\\1)"""', 'c99'], {}), "('result_(\\\\d{1})', 'res(\\\\1)', c99)\n", (3082, 3118), False, 'import re\n'), ((3689, 3724), 'os.path.join', 'os.path.join', (['path', '"""auto_eigs.cpp"""'], {}), "(path, 'auto_eigs.cpp')\n", (3701, 3724), False, 'import os\n'), ((3779, 3814), 'os.path.join', 'os.path.join', (['path', '"""auto_eigs.hpp"""'], {}), "(path, 'auto_eigs.hpp')\n", (3791, 3814), False, 'import os\n')]
|
from decimal import Decimal
from unittest import TestCase
from hummingbot.core.data_type.common import TradeType, PositionAction
from hummingbot.core.data_type.in_flight_order import TradeUpdate
from hummingbot.core.data_type.trade_fee import (
AddedToCostTradeFee,
DeductedFromReturnsTradeFee,
TokenAmount,
TradeFeeBase,
TradeFeeSchema,
)
class TradeFeeTests(TestCase):
def test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return(self):
schema = TradeFeeSchema(
percent_fee_token="HBOT",
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_spot_fee(
fee_schema=schema,
trade_type=TradeType.BUY,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(AddedToCostTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
def test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return(self):
schema = TradeFeeSchema(
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=True,
)
fee = TradeFeeBase.new_spot_fee(
fee_schema=schema,
trade_type=TradeType.BUY,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(DeductedFromReturnsTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
def test_deducted_from_return_spot_fee_created_for_sell(self):
schema = TradeFeeSchema(
percent_fee_token="HBOT",
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_spot_fee(
fee_schema=schema,
trade_type=TradeType.SELL,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(DeductedFromReturnsTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
schema.percent_fee_token = None
schema.buy_percent_fee_deducted_from_returns = True
fee = TradeFeeBase.new_spot_fee(
fee_schema=schema,
trade_type=TradeType.SELL,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(DeductedFromReturnsTradeFee, type(fee))
def test_added_to_cost_perpetual_fee_created_when_opening_positions(self):
schema = TradeFeeSchema(
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_perpetual_fee(
fee_schema=schema,
position_action=PositionAction.OPEN,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(AddedToCostTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
schema.percent_fee_token = "HBOT"
fee = TradeFeeBase.new_perpetual_fee(
fee_schema=schema,
position_action=PositionAction.OPEN,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(AddedToCostTradeFee, type(fee))
def test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token(self):
schema = TradeFeeSchema(
percent_fee_token="HBOT",
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_perpetual_fee(
fee_schema=schema,
position_action=PositionAction.CLOSE,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(AddedToCostTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
def test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token(self):
schema = TradeFeeSchema(
maker_percent_fee_decimal=Decimal("1"),
taker_percent_fee_decimal=Decimal("1"),
buy_percent_fee_deducted_from_returns=False,
)
fee = TradeFeeBase.new_perpetual_fee(
fee_schema=schema,
position_action=PositionAction.CLOSE,
percent=Decimal("1.1"),
percent_token="HBOT",
flat_fees=[TokenAmount(token="COINALPHA", amount=Decimal("20"))]
)
self.assertEqual(DeductedFromReturnsTradeFee, type(fee))
self.assertEqual(Decimal("1.1"), fee.percent)
self.assertEqual("HBOT", fee.percent_token)
self.assertEqual([TokenAmount(token="COINALPHA", amount=Decimal("20"))], fee.flat_fees)
def test_added_to_cost_json_serialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = AddedToCostTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
expected_json = {
"fee_type": AddedToCostTradeFee.type_descriptor_for_json(),
"percent": "0.5",
"percent_token": "COINALPHA",
"flat_fees": [token_amount.to_json()]
}
self.assertEqual(expected_json, fee.to_json())
def test_added_to_cost_json_deserialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = AddedToCostTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json()))
def test_deducted_from_returns_json_serialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = DeductedFromReturnsTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
expected_json = {
"fee_type": DeductedFromReturnsTradeFee.type_descriptor_for_json(),
"percent": "0.5",
"percent_token": "COINALPHA",
"flat_fees": [token_amount.to_json()]
}
self.assertEqual(expected_json, fee.to_json())
def test_deducted_from_returns_json_deserialization(self):
token_amount = TokenAmount(token="CO<PASSWORD>", amount=Decimal("20.6"))
fee = DeductedFromReturnsTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
self.assertEqual(fee, TradeFeeBase.from_json(fee.to_json()))
def test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self):
# Configure fee to use a percent token different from the token used to request the fee value
# That forces the logic to need the convertion rate if the fee amount is calculated
fee = AddedToCostTradeFee(percent=Decimal("0"), percent_token="CO<PASSWORD>")
fee_amount = fee.fee_amount_in_token(
trading_pair="HBOT-COINALPHA",
price=Decimal("1000"),
order_amount=Decimal("1"),
token="BNB")
self.assertEqual(Decimal("0"), fee_amount)
def test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero(self):
# Configure fee to use a percent token different from the token used to request the fee value
# That forces the logic to need the convertion rate if the fee amount is calculated
fee = DeductedFromReturnsTradeFee(percent=Decimal("0"), percent_token="CO<PASSWORD>")
fee_amount = fee.fee_amount_in_token(
trading_pair="HBOT-COINALPHA",
price=Decimal("1000"),
order_amount=Decimal("1"),
token="BNB")
self.assertEqual(Decimal("0"), fee_amount)
class TokenAmountTests(TestCase):
def test_json_serialization(self):
amount = TokenAmount(token="HBOT-COINALPHA", amount=Decimal("1000.50"))
expected_json = {
"token": "HBOT-COINALPHA",
"amount": "1000.50",
}
self.assertEqual(expected_json, amount.to_json())
def test_json_deserialization(self):
amount = TokenAmount(token="HBOT-COINALPHA", amount=Decimal("1000.50"))
self.assertEqual(amount, TokenAmount.from_json(amount.to_json()))
class TradeUpdateTests(TestCase):
def test_json_serialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = DeductedFromReturnsTradeFee(
percent=Decimal("0.5"),
percent_token="COINALPHA",
flat_fees=[token_amount]
)
trade_update = TradeUpdate(
trade_id="12345",
client_order_id="OID1",
exchange_order_id="EOID1",
trading_pair="HBOT-COINALPHA",
fill_timestamp=1640001112,
fill_price=Decimal("1000.11"),
fill_base_amount=Decimal("2"),
fill_quote_amount=Decimal("2000.22"),
fee=fee,
)
expected_json = trade_update._asdict()
expected_json.update({
"fill_price": "1000.11",
"fill_base_amount": "2",
"fill_quote_amount": "2000.22",
"fee": fee.to_json(),
})
self.assertEqual(expected_json, trade_update.to_json())
def test_json_deserialization(self):
token_amount = TokenAmount(token="COINALPHA", amount=Decimal("20.6"))
fee = DeductedFromReturnsTradeFee(
percent=Decimal("0.5"),
percent_token="CO<PASSWORD>",
flat_fees=[token_amount]
)
trade_update = TradeUpdate(
trade_id="12345",
client_order_id="OID1",
exchange_order_id="EOID1",
trading_pair="HBOT-COINALPHA",
fill_timestamp=1640001112,
fill_price=Decimal("1000.11"),
fill_base_amount=Decimal("2"),
fill_quote_amount=Decimal("2000.22"),
fee=fee,
)
self.assertEqual(trade_update, TradeUpdate.from_json(trade_update.to_json()))
|
[
"hummingbot.core.data_type.trade_fee.DeductedFromReturnsTradeFee.type_descriptor_for_json",
"hummingbot.core.data_type.trade_fee.AddedToCostTradeFee.type_descriptor_for_json",
"decimal.Decimal"
] |
[((1081, 1095), 'decimal.Decimal', 'Decimal', (['"""1.1"""'], {}), "('1.1')\n", (1088, 1095), False, 'from decimal import Decimal\n'), ((1917, 1931), 'decimal.Decimal', 'Decimal', (['"""1.1"""'], {}), "('1.1')\n", (1924, 1931), False, 'from decimal import Decimal\n'), ((2765, 2779), 'decimal.Decimal', 'Decimal', (['"""1.1"""'], {}), "('1.1')\n", (2772, 2779), False, 'from decimal import Decimal\n'), ((4030, 4044), 'decimal.Decimal', 'Decimal', (['"""1.1"""'], {}), "('1.1')\n", (4037, 4044), False, 'from decimal import Decimal\n'), ((5315, 5329), 'decimal.Decimal', 'Decimal', (['"""1.1"""'], {}), "('1.1')\n", (5322, 5329), False, 'from decimal import Decimal\n'), ((6185, 6199), 'decimal.Decimal', 'Decimal', (['"""1.1"""'], {}), "('1.1')\n", (6192, 6199), False, 'from decimal import Decimal\n'), ((6702, 6748), 'hummingbot.core.data_type.trade_fee.AddedToCostTradeFee.type_descriptor_for_json', 'AddedToCostTradeFee.type_descriptor_for_json', ([], {}), '()\n', (6746, 6748), False, 'from hummingbot.core.data_type.trade_fee import AddedToCostTradeFee, DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase, TradeFeeSchema\n'), ((7655, 7709), 'hummingbot.core.data_type.trade_fee.DeductedFromReturnsTradeFee.type_descriptor_for_json', 'DeductedFromReturnsTradeFee.type_descriptor_for_json', ([], {}), '()\n', (7707, 7709), False, 'from hummingbot.core.data_type.trade_fee import AddedToCostTradeFee, DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase, TradeFeeSchema\n'), ((8884, 8896), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (8891, 8896), False, 'from decimal import Decimal\n'), ((9531, 9543), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (9538, 9543), False, 'from decimal import Decimal\n'), ((597, 609), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (604, 609), False, 'from decimal import Decimal\n'), ((649, 661), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (656, 661), False, 'from decimal import Decimal\n'), ((861, 875), 'decimal.Decimal', 'Decimal', (['"""1.1"""'], {}), "('1.1')\n", (868, 875), False, 'from decimal import Decimal\n'), ((1426, 1438), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (1433, 1438), False, 'from decimal import Decimal\n'), ((1478, 1490), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (1485, 1490), False, 'from decimal import Decimal\n'), ((1689, 1703), 'decimal.Decimal', 'Decimal', (['"""1.1"""'], {}), "('1.1')\n", (1696, 1703), False, 'from decimal import Decimal\n'), ((2272, 2284), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (2279, 2284), False, 'from decimal import Decimal\n'), ((2324, 2336), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (2331, 2336), False, 'from decimal import Decimal\n'), ((2537, 2551), 'decimal.Decimal', 'Decimal', (['"""1.1"""'], {}), "('1.1')\n", (2544, 2551), False, 'from decimal import Decimal\n'), ((3175, 3189), 'decimal.Decimal', 'Decimal', (['"""1.1"""'], {}), "('1.1')\n", (3182, 3189), False, 'from decimal import Decimal\n'), ((3530, 3542), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (3537, 3542), False, 'from decimal import Decimal\n'), ((3582, 3594), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (3589, 3594), False, 'from decimal import Decimal\n'), ((3810, 3824), 'decimal.Decimal', 'Decimal', (['"""1.1"""'], {}), "('1.1')\n", (3817, 3824), False, 'from decimal import Decimal\n'), ((4397, 4411), 'decimal.Decimal', 'Decimal', (['"""1.1"""'], {}), "('1.1')\n", (4404, 4411), False, 'from decimal import Decimal\n'), ((4814, 4826), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (4821, 4826), False, 'from decimal import Decimal\n'), ((4866, 4878), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (4873, 4878), False, 'from decimal import Decimal\n'), ((5095, 5109), 'decimal.Decimal', 'Decimal', (['"""1.1"""'], {}), "('1.1')\n", (5102, 5109), False, 'from decimal import Decimal\n'), ((5676, 5688), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (5683, 5688), False, 'from decimal import Decimal\n'), ((5728, 5740), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (5735, 5740), False, 'from decimal import Decimal\n'), ((5957, 5971), 'decimal.Decimal', 'Decimal', (['"""1.1"""'], {}), "('1.1')\n", (5964, 5971), False, 'from decimal import Decimal\n'), ((6477, 6492), 'decimal.Decimal', 'Decimal', (['"""20.6"""'], {}), "('20.6')\n", (6484, 6492), False, 'from decimal import Decimal\n'), ((6549, 6563), 'decimal.Decimal', 'Decimal', (['"""0.5"""'], {}), "('0.5')\n", (6556, 6563), False, 'from decimal import Decimal\n'), ((7055, 7070), 'decimal.Decimal', 'Decimal', (['"""20.6"""'], {}), "('20.6')\n", (7062, 7070), False, 'from decimal import Decimal\n'), ((7127, 7141), 'decimal.Decimal', 'Decimal', (['"""0.5"""'], {}), "('0.5')\n", (7134, 7141), False, 'from decimal import Decimal\n'), ((7422, 7437), 'decimal.Decimal', 'Decimal', (['"""20.6"""'], {}), "('20.6')\n", (7429, 7437), False, 'from decimal import Decimal\n'), ((7502, 7516), 'decimal.Decimal', 'Decimal', (['"""0.5"""'], {}), "('0.5')\n", (7509, 7516), False, 'from decimal import Decimal\n'), ((8027, 8042), 'decimal.Decimal', 'Decimal', (['"""20.6"""'], {}), "('20.6')\n", (8034, 8042), False, 'from decimal import Decimal\n'), ((8107, 8121), 'decimal.Decimal', 'Decimal', (['"""0.5"""'], {}), "('0.5')\n", (8114, 8121), False, 'from decimal import Decimal\n'), ((8625, 8637), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (8632, 8637), False, 'from decimal import Decimal\n'), ((8777, 8792), 'decimal.Decimal', 'Decimal', (['"""1000"""'], {}), "('1000')\n", (8784, 8792), False, 'from decimal import Decimal\n'), ((8819, 8831), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (8826, 8831), False, 'from decimal import Decimal\n'), ((9272, 9284), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (9279, 9284), False, 'from decimal import Decimal\n'), ((9424, 9439), 'decimal.Decimal', 'Decimal', (['"""1000"""'], {}), "('1000')\n", (9431, 9439), False, 'from decimal import Decimal\n'), ((9466, 9478), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (9473, 9478), False, 'from decimal import Decimal\n'), ((9693, 9711), 'decimal.Decimal', 'Decimal', (['"""1000.50"""'], {}), "('1000.50')\n", (9700, 9711), False, 'from decimal import Decimal\n'), ((9983, 10001), 'decimal.Decimal', 'Decimal', (['"""1000.50"""'], {}), "('1000.50')\n", (9990, 10001), False, 'from decimal import Decimal\n'), ((10215, 10230), 'decimal.Decimal', 'Decimal', (['"""20.6"""'], {}), "('20.6')\n", (10222, 10230), False, 'from decimal import Decimal\n'), ((10295, 10309), 'decimal.Decimal', 'Decimal', (['"""0.5"""'], {}), "('0.5')\n", (10302, 10309), False, 'from decimal import Decimal\n'), ((10643, 10661), 'decimal.Decimal', 'Decimal', (['"""1000.11"""'], {}), "('1000.11')\n", (10650, 10661), False, 'from decimal import Decimal\n'), ((10692, 10704), 'decimal.Decimal', 'Decimal', (['"""2"""'], {}), "('2')\n", (10699, 10704), False, 'from decimal import Decimal\n'), ((10736, 10754), 'decimal.Decimal', 'Decimal', (['"""2000.22"""'], {}), "('2000.22')\n", (10743, 10754), False, 'from decimal import Decimal\n'), ((11197, 11212), 'decimal.Decimal', 'Decimal', (['"""20.6"""'], {}), "('20.6')\n", (11204, 11212), False, 'from decimal import Decimal\n'), ((11277, 11291), 'decimal.Decimal', 'Decimal', (['"""0.5"""'], {}), "('0.5')\n", (11284, 11291), False, 'from decimal import Decimal\n'), ((11628, 11646), 'decimal.Decimal', 'Decimal', (['"""1000.11"""'], {}), "('1000.11')\n", (11635, 11646), False, 'from decimal import Decimal\n'), ((11677, 11689), 'decimal.Decimal', 'Decimal', (['"""2"""'], {}), "('2')\n", (11684, 11689), False, 'from decimal import Decimal\n'), ((11721, 11739), 'decimal.Decimal', 'Decimal', (['"""2000.22"""'], {}), "('2000.22')\n", (11728, 11739), False, 'from decimal import Decimal\n'), ((1226, 1239), 'decimal.Decimal', 'Decimal', (['"""20"""'], {}), "('20')\n", (1233, 1239), False, 'from decimal import Decimal\n'), ((2062, 2075), 'decimal.Decimal', 'Decimal', (['"""20"""'], {}), "('20')\n", (2069, 2075), False, 'from decimal import Decimal\n'), ((2910, 2923), 'decimal.Decimal', 'Decimal', (['"""20"""'], {}), "('20')\n", (2917, 2923), False, 'from decimal import Decimal\n'), ((4175, 4188), 'decimal.Decimal', 'Decimal', (['"""20"""'], {}), "('20')\n", (4182, 4188), False, 'from decimal import Decimal\n'), ((5460, 5473), 'decimal.Decimal', 'Decimal', (['"""20"""'], {}), "('20')\n", (5467, 5473), False, 'from decimal import Decimal\n'), ((6330, 6343), 'decimal.Decimal', 'Decimal', (['"""20"""'], {}), "('20')\n", (6337, 6343), False, 'from decimal import Decimal\n'), ((972, 985), 'decimal.Decimal', 'Decimal', (['"""20"""'], {}), "('20')\n", (979, 985), False, 'from decimal import Decimal\n'), ((1800, 1813), 'decimal.Decimal', 'Decimal', (['"""20"""'], {}), "('20')\n", (1807, 1813), False, 'from decimal import Decimal\n'), ((2648, 2661), 'decimal.Decimal', 'Decimal', (['"""20"""'], {}), "('20')\n", (2655, 2661), False, 'from decimal import Decimal\n'), ((3286, 3299), 'decimal.Decimal', 'Decimal', (['"""20"""'], {}), "('20')\n", (3293, 3299), False, 'from decimal import Decimal\n'), ((3921, 3934), 'decimal.Decimal', 'Decimal', (['"""20"""'], {}), "('20')\n", (3928, 3934), False, 'from decimal import Decimal\n'), ((4508, 4521), 'decimal.Decimal', 'Decimal', (['"""20"""'], {}), "('20')\n", (4515, 4521), False, 'from decimal import Decimal\n'), ((5206, 5219), 'decimal.Decimal', 'Decimal', (['"""20"""'], {}), "('20')\n", (5213, 5219), False, 'from decimal import Decimal\n'), ((6068, 6081), 'decimal.Decimal', 'Decimal', (['"""20"""'], {}), "('20')\n", (6075, 6081), False, 'from decimal import Decimal\n')]
|
import math
import tensorflow as tf
import cv2
import numpy as np
from scipy import signal
def image_normalization(image: np.ndarray, new_min=0, new_max=255) -> np.ndarray:
"""
Normalize the input image to a given range set by min and max parameter
Args:
image ([type]): [description]
new_min ([type], optional): [description]. Defaults to 0.
new_max ([type], optional): [description]. Defaults to 255.
Returns:
[np.ndarray]: Normalized image
"""
original_dtype = image.dtype
image = image.astype(np.float32)
image_min, image_max = np.min(image), np.max(image)
image = tf.cast(image, np.float32)
normalized_image = (new_max - new_min) / (image_max - image_min) * (image - image_min) + new_min
return tf.cast(normalized_image, original_dtype)
def normalize_kernel(kernel: np.array) -> np.ndarray:
return kernel / np.sum(kernel, axis=-1)
def gaussian_kernel2d(kernel_size: int, sigma: float, dtype=np.float32) -> np.ndarray:
krange = np.arange(kernel_size)
x, y = np.meshgrid(krange, krange)
constant = np.round(kernel_size / 2)
x -= constant
y -= constant
kernel = 1 / (2 * math.pi * sigma**2) * np.math.exp(-(x ** 2 + y ** 2) / (2 * sigma ** 2))
return normalize_kernel(kernel)
def gaussian_filter(
image: np.ndarray, kernel_size: int,
sigma: float, dtype=np.float32, strides: int = 1
) -> np.ndarray:
"""
Apply convolution filter to image with gaussian image kernel
TODO: Verify this methos with tensorflow
https://stackoverflow.com/questions/48097941/strided-convolution-of-2d-in-numpy
Args:
image ([np.ndarray]): [description]
kernel_size ([int]): [description]
sigma ([float]): [description]
dtype ([type], optional): [description]. Defaults to np.float32.
strides ([int], optional): [description]. Defaults to 1.
Returns:
[np.ndarray]: [description]
"""
kernel = gaussian_kernel2d(kernel_size, sigma)
if len(image.shape) == 3:
image = image[np.newaxis, ...]
image = tf.cast(image, tf.float32)
image = image.astype(np.float32)
image = signal.convolve2d(image, kernel[:, :, np.newaxis, np.newaxis], mode='same', )[::strides, ::strides]
return image.astype(dtype)
def image_shape(image: np.ndarray, dtype=np.int32) -> np.ndarray:
shape = image.shape
shape = shape[:2] if len(image.shape) == 3 else shape[1:3]
return shape
def scale_shape(image: np.ndarray, scale: float):
shape = image_shape(image, np.float32)
shape = np.math.ceil(shape * scale)
return shape.astype(np.float32)
def rescale(image: np.ndarray, scale: float, dtype=np.float32, **kwargs) -> np.ndarray:
assert len(image.shape) in (3, 4), 'The tensor must be of dimension 3 or 4'
image = image.astype(np.float32)
rescale_size = scale_shape(image, scale)
interpolation = kwargs.pop('interpolation', cv2.INTER_CUBIC)
rescaled_image = cv2.resize(image, rescale_size, interpolation=interpolation)
return rescaled_image.astype(dtype)
def read_image(filename: str, **kwargs) -> np.ndarray:
mode = kwargs.pop('mode', cv2.IMREAD_UNCHANGED)
return cv2.imread(filename, flags=mode)
def image_preprocess(image: np.ndarray, SCALING_FACTOR=1 / 4) -> np.ndarray:
"""
#### Image Normalization
The first step for DIQA is to pre-process the images. The image is converted into grayscale,
and then a low-pass filter is applied. The low-pass filter is defined as:
\begin{align*}
\hat{I} = I_{gray} - I^{low}
\end{align*}
where the low-frequency image is the result of the following algorithm:
1. Blur the grayscale image.
2. Downscale it by a factor of SCALING_FACTOR.
3. Upscale it back to the original size.
The main reasons for this normalization are (1) the Human Visual System (HVS) is not sensitive to changes
in the low-frequency band, and (2) image distortions barely affect the low-frequency component of images.
Arguments:
image {np.ndarray} -- [description]
Returns:
np.ndarray -- [description]
"""
image = tf.cast(image, tf.float32)
image = tf.image.rgb_to_grayscale(image)
image_low = gaussian_filter(image, 16, 7 / 6)
image_low = rescale(image_low, SCALING_FACTOR, method=tf.image.ResizeMethod.BICUBIC)
image_low = tf.image.resize(image_low,
size=image_shape(image),
method=tf.image.ResizeMethod.BICUBIC)
return image - tf.cast(image_low, image.dtype)
|
[
"numpy.math.exp",
"numpy.meshgrid",
"tensorflow.image.rgb_to_grayscale",
"numpy.sum",
"scipy.signal.convolve2d",
"tensorflow.cast",
"cv2.imread",
"numpy.min",
"numpy.arange",
"numpy.max",
"numpy.math.ceil",
"numpy.round",
"cv2.resize"
] |
[((640, 666), 'tensorflow.cast', 'tf.cast', (['image', 'np.float32'], {}), '(image, np.float32)\n', (647, 666), True, 'import tensorflow as tf\n'), ((780, 821), 'tensorflow.cast', 'tf.cast', (['normalized_image', 'original_dtype'], {}), '(normalized_image, original_dtype)\n', (787, 821), True, 'import tensorflow as tf\n'), ((1024, 1046), 'numpy.arange', 'np.arange', (['kernel_size'], {}), '(kernel_size)\n', (1033, 1046), True, 'import numpy as np\n'), ((1058, 1085), 'numpy.meshgrid', 'np.meshgrid', (['krange', 'krange'], {}), '(krange, krange)\n', (1069, 1085), True, 'import numpy as np\n'), ((1101, 1126), 'numpy.round', 'np.round', (['(kernel_size / 2)'], {}), '(kernel_size / 2)\n', (1109, 1126), True, 'import numpy as np\n'), ((2100, 2126), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (2107, 2126), True, 'import tensorflow as tf\n'), ((2586, 2613), 'numpy.math.ceil', 'np.math.ceil', (['(shape * scale)'], {}), '(shape * scale)\n', (2598, 2613), True, 'import numpy as np\n'), ((2989, 3049), 'cv2.resize', 'cv2.resize', (['image', 'rescale_size'], {'interpolation': 'interpolation'}), '(image, rescale_size, interpolation=interpolation)\n', (2999, 3049), False, 'import cv2\n'), ((3210, 3242), 'cv2.imread', 'cv2.imread', (['filename'], {'flags': 'mode'}), '(filename, flags=mode)\n', (3220, 3242), False, 'import cv2\n'), ((4164, 4190), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (4171, 4190), True, 'import tensorflow as tf\n'), ((4203, 4235), 'tensorflow.image.rgb_to_grayscale', 'tf.image.rgb_to_grayscale', (['image'], {}), '(image)\n', (4228, 4235), True, 'import tensorflow as tf\n'), ((599, 612), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (605, 612), True, 'import numpy as np\n'), ((614, 627), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (620, 627), True, 'import numpy as np\n'), ((898, 921), 'numpy.sum', 'np.sum', (['kernel'], {'axis': '(-1)'}), '(kernel, axis=-1)\n', (904, 921), True, 'import numpy as np\n'), ((1207, 1257), 'numpy.math.exp', 'np.math.exp', (['(-(x ** 2 + y ** 2) / (2 * sigma ** 2))'], {}), '(-(x ** 2 + y ** 2) / (2 * sigma ** 2))\n', (1218, 1257), True, 'import numpy as np\n'), ((2176, 2251), 'scipy.signal.convolve2d', 'signal.convolve2d', (['image', 'kernel[:, :, np.newaxis, np.newaxis]'], {'mode': '"""same"""'}), "(image, kernel[:, :, np.newaxis, np.newaxis], mode='same')\n", (2193, 2251), False, 'from scipy import signal\n'), ((4564, 4595), 'tensorflow.cast', 'tf.cast', (['image_low', 'image.dtype'], {}), '(image_low, image.dtype)\n', (4571, 4595), True, 'import tensorflow as tf\n')]
|
from django.utils.translation import ugettext_lazy as _
search = {'text': _(u'search'), 'view': 'search', 'famfam': 'zoom'}
search_advanced = {'text': _(u'advanced search'), 'view': 'search_advanced', 'famfam': 'zoom_in'}
search_again = {'text': _(u'search again'), 'view': 'search_again', 'famfam': 'arrow_undo'}
|
[
"django.utils.translation.ugettext_lazy"
] |
[((75, 87), 'django.utils.translation.ugettext_lazy', '_', (['u"""search"""'], {}), "(u'search')\n", (76, 87), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((152, 173), 'django.utils.translation.ugettext_lazy', '_', (['u"""advanced search"""'], {}), "(u'advanced search')\n", (153, 173), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((247, 265), 'django.utils.translation.ugettext_lazy', '_', (['u"""search again"""'], {}), "(u'search again')\n", (248, 265), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
import sys
import re
from utils.utils import print_writeofd
# First argument is whether or not to proceed with manual checking:
if sys.argv[1] == '-m':
MANUAL_CHECKING = True
elif sys.argv[1] == '-a':
MANUAL_CHECKING = False
else:
print("The first argument must be either -m or -a, see README.md for details")
exit(1)
# Second argument is the output file from async_main_google.py
ifd = open(sys.argv[2], 'r')
# Third argument is the output file for a list of all repos
ofd = open(sys.argv[3], 'w')
# All files number
allfile = 0
# All occurences of found 0 files
no_async = 0
# Determined cases of no parallelism
noparrelism = 0
# Determined cases of no pattern:
nopattern = 0
# Number of exception cases - repo no longer exist
github_exception = 0
# Number of exception cases - processing error
proces_exception = 0
# No retrieve result files
no_retrieve = 0
# Use Lambda function
use_lambda = 0
# Possible parallelism
possible_para = 0
# Determined no parallelism
det_no_para = 0
# Determined parallelism
det_para = 0
# There exists code in between start clause and while clause
between_code = 0
# Determined to be no pattern
det_no_pattern = 0
def get_all_add_up():
return no_async + noparrelism + nopattern + github_exception + proces_exception + no_retrieve + use_lambda + possible_para + det_no_pattern
def scan_block(lines, i, j, keyword):
while i < j:
if keyword in lines[i]:
return True
i += 1
return False
def scan_block_numbers(lines, i, j, keyword):
ret = 0
while i < j:
if keyword in lines[i]:
ret += 1
i += 1
return ret
def print_code(i, j, lines):
while i < j:
if "Nodes in between start statement and while statement" in lines[i]:
i_copy = i
while "------" not in lines[i_copy]:
print(lines[i_copy])
i_copy += 1
break
i += 1
safe_list = ["if response:", "job_id = response['JobId']",
"synthesis_task = {'taskId': response['SynthesisTask']['TaskId']", "'taskStatus': 'inProgress'}", "taskId = response['SynthesisTask']['TaskId']"]
def check_safe_list(string):
for safes in safe_list:
if safes in string:
return True
return False
def judge_code(i, j, lines):
while i < j:
if "Nodes in between start statement and while statement" in lines[i]:
i_copy = i + 1
while "------" not in lines[i_copy]:
if lines[i_copy].isspace():
i_copy += 1
continue
if check_safe_list(lines[i_copy]):
i_copy += 1
continue
if "operation.done" in lines[i_copy] or "operation.result" in lines[i_copy]:
return True
return False
i_copy += 1
return True
i += 1
return False
lines = ifd.readlines()
i = 0
while i < len(lines):
begin = get_all_add_up()
allfile += 1
j = i + 1
while j < len(lines) and lines[j] != "=================================================\n":
j += 1
if j > len(lines):
break
# Now i and j stores the start and end of one search snippet
k = i + 1
# Judge if there is any github exception triggered
if scan_block(lines, i, j, "Other Github Exceptions occurred"):
github_exception += 1
ofd.write("github_exception: {}".format(lines[k]))
i = j
continue
# Judge if there is any other exception triggered
if scan_block(lines, i, j, "EXCEPTION OCCURS"):
proces_exception += 1
ofd.write("process_exception: {}".format(lines[k]))
i = j
continue
# Judge if this is a use lambda function case
# If only relying on auto-tool: this should be a parallelism-used case
if "Use Lambda Function" in lines[j - 1]:
if MANUAL_CHECKING:
print("use_lambda: {}".format(lines[k]))
print("Please inspect the above. Enter 1 if this is a no parallelism case, and enter 2 if this is a use lambda case")
user = input()
while user != '1' and user != '2':
print("PRESS 1 OR 2, NOT ANYTHING ELSE!")
user = input()
if user == '1':
det_no_para += 1
print_writeofd("use_lambda (no_parallelism): {}".format(lines[k].strip("\n")), ofd)
i = j
continue
elif user == '2':
use_lambda += 1
print_writeofd("use_lambda (use_lambda): {}".format(lines[k].strip("\n")), ofd)
i = j
continue
else:
use_lambda += 1
ofd.write("use_lambda: {}".format(lines[k]))
i = j
continue
# Judge if this is a no pattern identified case
# If only relying on auto-tool: this should be a parallelism-used case
if "NO PATTERN IDENTIFIED" in lines[j - 1]:
if MANUAL_CHECKING:
print("\n\n\n\n\n\n")
print_writeofd("no_pattern: {}".format(lines[k].strip("\n")), ofd)
print("Please inspect the above. Enter 1 if this is a no parallelism case, and enter 2 if this is a use-parallelism case, and enter 3 if this shouldn't count")
user = input()
while user != '1' and user != '2' and user != '3':
print("PRESS 1 OR 2 OR 3, NOT ANYTHING ELSE!")
user = input()
if user == '1':
det_no_para += 1
print_writeofd("no_pattern (no_parallelism): {}".format(lines[k].strip("\n")), ofd)
i = j
continue
elif user == '2':
det_para += 1
print_writeofd("no_pattern (parallelism): {}".format(lines[k].strip("\n")), ofd)
i = j
continue
# These are for cases where the repo is actually mis-using the API
elif user == '3':
proces_exception += 1
print_writeofd("no_pattern (process_exception): {}".format(lines[k].strip("\n")), ofd)
i = j
continue
else:
nopattern += 1
ofd.write("no_pattern: {}".format(lines[k]))
i = j
continue
# Judge if this is a no use of async case
# Such project should not be counted towards the total count of projects
if "No use of async" in lines[j - 1]:
no_async += 1
ofd.write("no_async: {}".format(lines[k]))
i = j
continue
# Judge if this is a no retrieve result case
# Such project should not be counted towards the total count of projects
if "No retrieve result" in lines[j - 1]:
no_retrieve += 1
ofd.write("no_retrieve: {}".format(lines[k]))
i = j
continue
# At this point there shouldn't be any "operating missing", sanity check:
if scan_block(lines, i, j, "operation") and scan_block(lines, i, j, "missing") and (not scan_block(lines, i, j, "Pattern identified")):
print("Operation missing while it's neither use lambda nor no pattern identified: {}".format(lines[k]))
exit(1)
# Check if needs to prompt users on codes between start and while statement:
if scan_block(lines, i, j, "Nodes in between start statement and while statement"):
# If these two numbers equal then need to prompt users:
if scan_block_numbers(lines, i, j, "Nodes in between start statement and while statement") == scan_block_numbers(lines, i, j, "Pattern identified"):
between_code += 1
if MANUAL_CHECKING:
print("\n\n\n\n\n\n")
print_code(i, j, lines)
print("Please inspect the above. Enter 1 if can proceed, and enter 2 if this is a use_parallelism case")
user = input()
while user != '1' and user != '2':
print("PRESS 1 OR 2, NOT ANYTHING ELSE!")
user = input()
if user == '1':
print_writeofd("code_between (proceeds): {}".format(lines[k].strip('\n')), ofd)
elif user == '2':
det_para += 1
print_writeofd("code_between (parallelism): {}".format(lines[k].strip('\n')), ofd)
i = j
continue
# If not manual checking, then just count this as a no parallelism use case
else:
if not judge_code(i, j, lines):
det_no_pattern += 1
i = j
continue
# Judge if this is a no use of parallelism case
if "No use of parallelism" in lines[j - 1]:
noparrelism += 1
ofd.write("no_parallelism: {}".format(lines[k]))
i = j
continue
while i < j:
if "***" in lines[i]:
i_copy = i
while i_copy < j:
if "BOTH IDENTIFIED IN THE SAME FILE" in lines[i_copy]:
# Only do the following if doing manual checking
if MANUAL_CHECKING:
possible_para += 1
print("\n\n\n\n\n\n")
print(lines[i])
i += 1
while i < j and "========================" not in lines[i]:
print(lines[i])
i += 1
if i != j:
print(lines[i])
print("Please inspect the above. Enter 1 if this is a no parallelism case, and enter 2 if this is a use-parallelism case")
user = input()
while user != '1' and user != '2':
print("PRESS 1 OR 2, NOT ANYTHING ELSE!")
user = input()
if user == '1':
det_no_para += 1
print_writeofd("possible_parallelism (no_parallelism): {}".format(lines[k].strip("\n")), ofd)
elif user == '2':
det_para += 1
print_writeofd("possible_parallelism (parallelism): {}".format(lines[k].strip("\n")), ofd)
break
else:
i += 1
while i < j and "========================" not in lines[i]:
i += 1
ofd.write("possible_parallelism: {}".format(lines[k]))
possible_para += 1
break
i_copy += 1
if i_copy == j:
ofd.write("no_parallelism: {}".format(lines[k]))
noparrelism += 1
break
i += 1
i = j
ofd.write("\n\n==================================================================\n")
if not MANUAL_CHECKING:
print_writeofd("{}, Total files searched".format(allfile), ofd)
print_writeofd("BEFORE MANUAL INSPECTION:", ofd)
print_writeofd("{}, No use of Async".format(no_async), ofd)
print_writeofd("{}, Github search exceptions".format(github_exception), ofd)
print_writeofd("{}, Processing exceptions".format(proces_exception), ofd)
print_writeofd("{}, Use of Lambda Function".format(use_lambda), ofd)
print_writeofd("{}, No retrieve result".format(no_retrieve), ofd)
print_writeofd("{}, No pattern identified".format(nopattern + det_no_pattern), ofd)
print_writeofd("{}, No use of parallelism".format(noparrelism), ofd)
print_writeofd("{}, Possible use of parallel cases".format(possible_para), ofd)
print_writeofd("RELYING ON AUTO TOOL: {} NO USE OF PARALELLISM".format(noparrelism), ofd)
print_writeofd("RELYING ON AUTO TOOL: {} PARALELLISM USED".format(possible_para + nopattern + det_no_pattern + use_lambda), ofd)
print_writeofd("RELYING ON AUTO TOOL: {} RELEVANT TOTAL PROJECTS".format(noparrelism + possible_para + nopattern + det_no_pattern + use_lambda), ofd)
elif MANUAL_CHECKING:
print_writeofd("", ofd)
print_writeofd("", ofd)
print_writeofd("After MANUAL INSPECTION:", ofd)
print_writeofd("{}, No use of Async".format(no_async), ofd)
print_writeofd("{}, Github search exceptions".format(github_exception), ofd)
print_writeofd("{}, Processing exceptions".format(proces_exception), ofd)
print_writeofd("{}, Use of Lambda Function".format(use_lambda), ofd)
print_writeofd("{}, No retrieve result".format(no_retrieve), ofd)
print_writeofd("{}, No pattern identified".format(nopattern + det_no_pattern), ofd)
print_writeofd("{}, No use of parallelism".format(noparrelism + det_no_para), ofd)
print_writeofd("{}, Use of parallel cases".format(det_para), ofd)
print_writeofd("RELYING ON MANUAL CHECKING: {} NO USE OF PARALELLISM".format(noparrelism + det_no_para), ofd)
print_writeofd("RELYING ON MANUAL CHECKING: {} PARALELLISM USED".format(det_para + use_lambda), ofd)
print_writeofd("RELYING ON MANUAL CHECKING: {} RELEVANT TOTAL PROJECTS".format(noparrelism + det_no_para + det_para + use_lambda), ofd)
ofd.close()
|
[
"utils.utils.print_writeofd"
] |
[((11140, 11188), 'utils.utils.print_writeofd', 'print_writeofd', (['"""BEFORE MANUAL INSPECTION:"""', 'ofd'], {}), "('BEFORE MANUAL INSPECTION:', ofd)\n", (11154, 11188), False, 'from utils.utils import print_writeofd\n'), ((12208, 12231), 'utils.utils.print_writeofd', 'print_writeofd', (['""""""', 'ofd'], {}), "('', ofd)\n", (12222, 12231), False, 'from utils.utils import print_writeofd\n'), ((12236, 12259), 'utils.utils.print_writeofd', 'print_writeofd', (['""""""', 'ofd'], {}), "('', ofd)\n", (12250, 12259), False, 'from utils.utils import print_writeofd\n'), ((12264, 12311), 'utils.utils.print_writeofd', 'print_writeofd', (['"""After MANUAL INSPECTION:"""', 'ofd'], {}), "('After MANUAL INSPECTION:', ofd)\n", (12278, 12311), False, 'from utils.utils import print_writeofd\n')]
|
import sqlite3
def connect_to_db(db_name="rpg_db.sqlite3"):
return sqlite3.connect(db_name)
def execute_query(cursor, query):
cursor.execute(query)
return cursor.fetchall()
GET_CHARACTERS = """
SELECT *
FROM charactercreator_character
"""
CHARACTER_COUNT = """
SELECT COUNT(*)
FROM charactercreator_character
"""
CLASS_COUNT = """
SELECT (SELECT COUNT(*) FROM charactercreator_cleric) AS cleric,
(SELECT COUNT(*) FROM charactercreator_fighter) AS fighter,
(SELECT COUNT(*) FROM charactercreator_mage) AS mage,
(SELECT COUNT(*) FROM charactercreator_necromancer) AS necromancer,
(SELECT COUNT(*) FROM charactercreator_thief) AS theif
"""
ITEM_COUNT = """
SELECT COUNT(*)
FROM armory_item
"""
WEP_COUNT = """
SELECT COUNT(*) name
FROM armory_item
INNER JOIN armory_weapon
ON armory_item.item_id = armory_weapon.item_ptr_id
"""
ITEMS_NO_WEPS = """
SELECT(
SELECT COUNT(*)
FROM armory_item
) -
(SELECT COUNT(*)
FROM armory_weapon
)
"""
CHAR_ITEM_COUNT = """
SELECT character_id, COUNT(*)
FROM charactercreator_character_inventory
GROUP BY item_id LIMIT 20;
"""
CHAR_WEP_COUNT = """
SELECT charactercreator_character_inventory.character_id, COUNT(*)
FROM charactercreator_character_inventory
INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id
GROUP BY charactercreator_character_inventory.character_id LIMIT 20
"""
AVG_WEAPONS = """
SELECT AVG(num_weapons)
FROM
(
SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_weapons
FROM charactercreator_character_inventory
INNER JOIN armory_weapon ON charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id
GROUP BY charactercreator_character_inventory.character_id
)
"""
AVG_ITEMS = """
SELECT AVG(num_items)
FROM
(
SELECT charactercreator_character_inventory.character_id, COUNT(*) AS num_items
FROM charactercreator_character_inventory
INNER JOIN armory_item ON charactercreator_character_inventory.item_id = armory_item.item_id
GROUP BY charactercreator_character_inventory.character_id
)
"""
if __name__ == "__main__":
conn = connect_to_db()
curs = conn.cursor()
char_count = execute_query(curs, CHARACTER_COUNT)
results = execute_query(curs, GET_CHARACTERS)
class_count = execute_query(curs, CLASS_COUNT)
item_count = execute_query(curs, ITEM_COUNT)
wep_count = execute_query(curs, WEP_COUNT)
items_no_weps = execute_query(curs, ITEMS_NO_WEPS)
char_item_count = execute_query(curs, CHAR_ITEM_COUNT)
char_wep_count = execute_query(curs, CHAR_WEP_COUNT)
avg_items = execute_query(curs, AVG_ITEMS)
avg_weapons = execute_query(curs, AVG_WEAPONS)
print(results[0])
print("Character Count:", char_count)
print("Class Count (cleric, fighter, mage, necromancer, theif):", class_count)
print("Item Count", item_count)
print("Weapon Count:", wep_count)
print("Items without Weapons:", items_no_weps)
print("Items per character ID:", char_item_count)
print("Weapons per character ID:", char_wep_count)
print("Average Number of Items Per Character:", avg_items)
print("Average Number of Weapons Per Character:", avg_weapons)
|
[
"sqlite3.connect"
] |
[((72, 96), 'sqlite3.connect', 'sqlite3.connect', (['db_name'], {}), '(db_name)\n', (87, 96), False, 'import sqlite3\n')]
|
import unittest
from letter_capitalize import LetterCapitalize
class TestWordCapitalize(unittest.TestCase):
def test_word_capitalize(self):
self.assertEqual(LetterCapitalize("hello world"), "Hello World")
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"letter_capitalize.LetterCapitalize"
] |
[((251, 266), 'unittest.main', 'unittest.main', ([], {}), '()\n', (264, 266), False, 'import unittest\n'), ((171, 202), 'letter_capitalize.LetterCapitalize', 'LetterCapitalize', (['"""hello world"""'], {}), "('hello world')\n", (187, 202), False, 'from letter_capitalize import LetterCapitalize\n')]
|
#!/usr/bin/env python
# Spider and start listening for passive requests
import sys
from zapv2 import ZAPv2
from zap_common import *
#Configuration
zap_ip = 'localhost'
port = 12345
spiderTimeoutInMin = 2
startupTimeoutInMin=1
target='http://localhost:8080'
def main(argv):
#Initialize Zap API
http_proxy = 'http://' + zap_ip + ':' + str(port)
https_proxy = 'http://' + zap_ip + ':' + str(port)
zap = ZAPv2(proxies={'http': http_proxy, 'https': https_proxy})
#Check untill zap is running
wait_for_zap_start(zap, startupTimeoutInMin*60)
#Check that target is reachable
zap_access_target(zap, target)
# Use both spider
zap_spider(zap, target)
zap_ajax_spider(zap, target, spiderTimeoutInMin)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"zapv2.ZAPv2"
] |
[((411, 468), 'zapv2.ZAPv2', 'ZAPv2', ([], {'proxies': "{'http': http_proxy, 'https': https_proxy}"}), "(proxies={'http': http_proxy, 'https': https_proxy})\n", (416, 468), False, 'from zapv2 import ZAPv2\n')]
|
from sparknlp.annotator import XlmRoBertaSentenceEmbeddings
class Sentence_XLM:
@staticmethod
def get_default_model():
return XlmRoBertaSentenceEmbeddings.pretrained() \
.setInputCols("sentence", "token") \
.setOutputCol("sentence_xlm_roberta")
@staticmethod
def get_pretrained_model(name, language):
return XlmRoBertaSentenceEmbeddings.pretrained(name, language) \
.setInputCols("sentence", "token") \
.setOutputCol("sentence_xlm_roberta")
|
[
"sparknlp.annotator.XlmRoBertaSentenceEmbeddings.pretrained"
] |
[((143, 184), 'sparknlp.annotator.XlmRoBertaSentenceEmbeddings.pretrained', 'XlmRoBertaSentenceEmbeddings.pretrained', ([], {}), '()\n', (182, 184), False, 'from sparknlp.annotator import XlmRoBertaSentenceEmbeddings\n'), ((358, 413), 'sparknlp.annotator.XlmRoBertaSentenceEmbeddings.pretrained', 'XlmRoBertaSentenceEmbeddings.pretrained', (['name', 'language'], {}), '(name, language)\n', (397, 413), False, 'from sparknlp.annotator import XlmRoBertaSentenceEmbeddings\n')]
|
import json
import pprint
def pformat(value):
"""
Format given object: Try JSON fist and fallback to pformat()
(JSON dumps are nicer than pprint.pformat() ;)
"""
try:
value = json.dumps(value, indent=4, sort_keys=True, ensure_ascii=False)
except TypeError:
# Fallback if values are not serializable with JSON:
value = pprint.pformat(value, width=120)
return value
|
[
"pprint.pformat",
"json.dumps"
] |
[((205, 268), 'json.dumps', 'json.dumps', (['value'], {'indent': '(4)', 'sort_keys': '(True)', 'ensure_ascii': '(False)'}), '(value, indent=4, sort_keys=True, ensure_ascii=False)\n', (215, 268), False, 'import json\n'), ((368, 400), 'pprint.pformat', 'pprint.pformat', (['value'], {'width': '(120)'}), '(value, width=120)\n', (382, 400), False, 'import pprint\n')]
|
#----------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#----------------------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
import os
from mmdnn.conversion.examples.darknet import darknet as cdarknet
from mmdnn.conversion.examples.imagenet_test import TestKit
from mmdnn.conversion.examples.extractor import base_extractor
from mmdnn.conversion.common.utils import download_file
class darknet_extractor(base_extractor):
_base_model_url = "https://raw.githubusercontent.com/pjreddie/darknet/master/"
architecture_map = {
'yolov3' : {
'config' : _base_model_url + "cfg/yolov3.cfg",
'weights' : "https://pjreddie.com/media/files/yolov3.weights"
},
'yolov2' :{
'config' : _base_model_url + "cfg/yolov2.cfg",
'weights' : "https://pjreddie.com/media/files/yolov2.weights"
}
}
@classmethod
def download(cls, architecture, path = './'):
if cls.sanity_check(architecture):
cfg_name = architecture + ".cfg"
architecture_file = download_file(cls.architecture_map[architecture]['config'], directory=path, local_fname=cfg_name)
if not architecture_file:
return None
weight_name = architecture + ".weights"
weight_file = download_file(cls.architecture_map[architecture]['weights'], directory=path, local_fname=weight_name)
if not weight_file:
return None
print("Darknet Model {} saved as [{}] and [{}].".format(architecture, architecture_file, weight_file))
return (architecture_file, weight_file)
else:
return None
@classmethod
def inference(cls, architecture, files, model_path, image_path):
import numpy as np
if cls.sanity_check(architecture):
download_file(cls._base_model_url + "cfg/coco.data", directory='./')
download_file(cls._base_model_url + "data/coco.names", directory='./data/')
print(files)
net = cdarknet.load_net(files[0].encode(), files[1].encode(), 0)
meta = cdarknet.load_meta("coco.data".encode())
r = cdarknet.detect(net, meta, image_path.encode())
# print(r)
return r
else:
return None
# d = darknet_extractor()
# model_filename = d.download('yolov3')
# print(model_filename)
# image_path = "./mmdnn/conversion/examples/data/dog.jpg"
# model_path = "./"
# d = darknet_extractor()
# result = d.inference('yolov3', model_filename, model_path, image_path = image_path)
# print(result)
|
[
"mmdnn.conversion.common.utils.download_file"
] |
[((1417, 1518), 'mmdnn.conversion.common.utils.download_file', 'download_file', (["cls.architecture_map[architecture]['config']"], {'directory': 'path', 'local_fname': 'cfg_name'}), "(cls.architecture_map[architecture]['config'], directory=path,\n local_fname=cfg_name)\n", (1430, 1518), False, 'from mmdnn.conversion.common.utils import download_file\n'), ((1660, 1765), 'mmdnn.conversion.common.utils.download_file', 'download_file', (["cls.architecture_map[architecture]['weights']"], {'directory': 'path', 'local_fname': 'weight_name'}), "(cls.architecture_map[architecture]['weights'], directory=path,\n local_fname=weight_name)\n", (1673, 1765), False, 'from mmdnn.conversion.common.utils import download_file\n'), ((2200, 2268), 'mmdnn.conversion.common.utils.download_file', 'download_file', (["(cls._base_model_url + 'cfg/coco.data')"], {'directory': '"""./"""'}), "(cls._base_model_url + 'cfg/coco.data', directory='./')\n", (2213, 2268), False, 'from mmdnn.conversion.common.utils import download_file\n'), ((2281, 2356), 'mmdnn.conversion.common.utils.download_file', 'download_file', (["(cls._base_model_url + 'data/coco.names')"], {'directory': '"""./data/"""'}), "(cls._base_model_url + 'data/coco.names', directory='./data/')\n", (2294, 2356), False, 'from mmdnn.conversion.common.utils import download_file\n')]
|
import matplotlib
from collections import defaultdict, OrderedDict
from plots.DotSetPlot import DotSetPlot
processToTitle = {
"targetMirsECA": "EC activation and\n inflammation",
"targetMirsMonocyte": "Monocyte diff. &\nMacrophage act.",
"targetMirsFCF": "Foam cell formation",
"targetMirsAngio": "Angiogenesis",
"targetMirsVasRemod": "Vascular remodeling",
"targetMirsTCell": "T cell differentiation &\n activation",
"targetMirsCholEfflux": "Cholesterol efflux",
"targetMirsSMCProlif": "SMC proliferation &\n SMC migration"
}
network2nicename = {
"CV-IPN-Plaque_destabilization_1": "(VI) Plaque destabilization",
"CV-IPN-Platelet_activation_1": "(V) Platelet activation",
"CV-IPN-Smooth_muscle_cell_activation_1": "(IV) SMC activation",
"CV-IPN-Foam_cell_formation_1": "(III) Foam cell formation",
"CV-IPN-Endothelial_cell-monocyte_interaction_1": "(II) EC/MC interaction",
"CV-IPN-Endothelial_cell_activation_1": "(I) EC activation",
}
celltype2nicename = {
'SMC': "Smooth muscle cell",
'EC': "Endothelial cell",
"MC": "Macrophage/Monocyte",
"FC": "Foam cell"
}
def source2index( sname ):
if sname != None and sname.startswith("CV-IPN"):
return 0
return 1
mirna2evidenceCellT = defaultdict(lambda: defaultdict(set))
mirna2evidenceCBN = defaultdict(lambda: defaultdict(set))
mirna2evidenceProcess = defaultdict(lambda: defaultdict(set))
pubmed2tuples = defaultdict(set)
mirna2evflows = defaultdict(set)
dataLabels = defaultdict(set)
#"miR-98", "miR-125a"
manuMirnas = ["miR-98", "miR-125a","miR-21", "miR-34a", "miR-93", "miR-125b", "miR-126", "miR-146a", "miR-155", "miR-370"]
#manuMirnas = ['miR-126', 'miR-21', 'miR-155', 'miR-146a', 'miR-125b', 'miR-34a', 'miR-499', 'miR-221', 'miR-370', 'miR-504']
#manuMirnas = ['miR-181c', 'miR-222', 'miR-126', 'miR-155', 'miR-125b', 'miR-34a', 'miR-370', 'miR-146a', 'miR-21', 'miR-93']
manuMirnas = list({'miR-155', 'miR-93', 'miR-181c', 'miR-370', 'miR-222', 'miR-125b', 'miR-34a', 'miR-146a', 'miR-126', 'miR-21'})
manuMirnas = ["miR-98", "miR-125a","miR-21", "miR-34a", "miR-93", "miR-125b", "miR-126", "miR-146a", "miR-155", "miR-370"]
miRNA2InteractionPartner = defaultdict(set)
miRNA2Evidences = defaultdict(set)
with open("/mnt/d/yanc_network/disease_pw_important_cbn.txt", 'r') as fin:
for line in fin:
line = line.strip().split("\t")
#CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554
cbn = network2nicename.get(line[0], line[0])
gene = line[1]
miRNA = line[2]
cellT = celltype2nicename.get(line[3], line[3])
evidence = line[4]
if "US" in miRNA:
continue
miRNA2InteractionPartner[miRNA].add(gene)
miRNA2Evidences[miRNA].add(evidence)
dataLabels["Cell-Type"].add(cellT)
dataLabels["CBN"].add(cbn)
mirna2evidenceCellT[miRNA][evidence].add(cellT)
mirna2evidenceCBN[miRNA][evidence].add(cbn)
#important_process
with open("/mnt/d/yanc_network/pathway_important_process.txt", 'r') as fin:
for line in fin:
line = line.strip().split("\t")
#CV-IPN-Endothelial_cell-monocyte_interaction_1 VEGFA miR-140 EC 27035554
process = processToTitle.get(line[0], line[0])
gene = line[1]
miRNA = line[2]
cellT = celltype2nicename.get(line[3], line[3])
evidence = line[4]
if "US" in miRNA:
continue
miRNA2InteractionPartner[miRNA].add(gene)
miRNA2Evidences[miRNA].add(evidence)
dataLabels["Cell-Type"].add(cellT)
dataLabels["Process"].add(process)
mirna2evidenceCellT[miRNA][evidence].add(cellT)
mirna2evidenceProcess[miRNA][evidence].add(process)
for x in manuMirnas:
print(x, miRNA2InteractionPartner[x], miRNA2Evidences[x])
allMiRNA = set()
for x in mirna2evidenceCellT:
allMiRNA.add(x)
for x in mirna2evidenceProcess:
allMiRNA.add(x)
for x in mirna2evidenceCBN:
allMiRNA.add(x)
dataUpPlot = {}
for miRNA in allMiRNA:
miRNAEvs = set()
for x in mirna2evidenceCBN.get(miRNA, []):
miRNAEvs.add(x)
for x in mirna2evidenceProcess.get(miRNA, []):
miRNAEvs.add(x)
for x in mirna2evidenceCellT.get(miRNA, []):
miRNAEvs.add(x)
miRNAData = {
"CBN": set(),
"Process": set(),
"Cell-Type": set()
}
for ev in miRNAEvs:
cellT = mirna2evidenceCellT[miRNA].get(ev, None)
cbns = mirna2evidenceCBN[miRNA].get(ev, None)
process = mirna2evidenceProcess[miRNA].get(ev, None)
if cellT != None:
miRNAData['Cell-Type'] = miRNAData['Cell-Type'].union(cellT)
if cbns != None:
miRNAData['CBN'] = miRNAData['CBN'].union(cbns)
if process != None:
miRNAData['Process'] = miRNAData['Process'].union(process)
dataUpPlot[miRNA] = miRNAData
orderDict = OrderedDict()
for type in ["CBN", "Process", "Cell-Type"]:
orderDict[type] = sorted(dataLabels[type])
def makeMIRNAName(miRNA):
return miRNA
return miRNA + " (" + str(len(miRNA2InteractionPartner[miRNA])) + ","+ str(len(miRNA2Evidences[miRNA]))+")"
filteredData = OrderedDict()
for miRNA in manuMirnas:
if miRNA in dataUpPlot:
filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA]
else:
print("Missing manu", miRNA)
stages2 = 0
stages0 = 0
from natsort import natsorted
for miRNA in natsorted(dataUpPlot, key=lambda x: x.split("-")[1]):
stages = dataUpPlot[miRNA]['CBN']
if len(miRNA2Evidences[miRNA]) <= 0:
continue
if len(dataUpPlot[miRNA]['Process']) == 0:
pass#continue
if len(dataUpPlot[miRNA]['CBN']) == 0:
continue
filteredData[makeMIRNAName(miRNA)] = dataUpPlot[miRNA]
print(len(dataUpPlot))
print(len(filteredData))
print(stages2)
print(stages0)
fout = open("/mnt/c/Users/mjopp/Desktop/d3-parsets-d3v5/titanic.csv", "w")
print("miRNA", "CBN", "PROCESS", "CELLTYPE", sep=",", file=fout)
mirna2printTuple = defaultdict(list)
for miRNA in allMiRNA:
miRNAEvs = set()
for x in mirna2evidenceCBN.get(miRNA, []):
miRNAEvs.add(x)
for x in mirna2evidenceProcess.get(miRNA, []):
miRNAEvs.add(x)
for x in mirna2evidenceCellT.get(miRNA, []):
miRNAEvs.add(x)
miRNAData = {
"CBN": set(),
"Process": set(),
"Cell-Type": set()
}
for ev in miRNAEvs:
cellT = mirna2evidenceCellT[miRNA].get(ev, ["None"])
cbns = mirna2evidenceCBN[miRNA].get(ev, ["None"])
processes = mirna2evidenceProcess[miRNA].get(ev, ["None"])
if miRNA == "miR-98":
print(ev, cbns, celltype, process)
if "None" in cbns:# or "None" in processes:
continue
for celltype in cellT:
for cbn in cbns:
for process in processes:
mirna2printTuple[miRNA].append( (cbn, process, celltype) )
selMirnas = sorted([x for x in mirna2printTuple], reverse=True, key=lambda x: len(mirna2printTuple[x]))
print(selMirnas[0:10])
for miRNA in manuMirnas:
for (cbn, process, celltype) in mirna2printTuple[miRNA]:
print(miRNA, cbn.replace("\n", " ").replace(" ", " "), process.replace("\n", " ").replace(" ", " "), celltype, sep=",", file=fout)
interactorCounts = [len(miRNA2InteractionPartner[miRNA]) for miRNA in filteredData]
pubmedCounts = [len(miRNA2Evidences[miRNA]) for miRNA in filteredData]
DotSetPlot().plot(dataLabels, filteredData, numbers={"Interactor Count":interactorCounts , "PubMed Evidence Count": pubmedCounts },sortData=False,order=orderDict)#, max=30)
matplotlib.pyplot.savefig("/mnt/d/owncloud/markus/uni/publications/miReview/dotset_important.pdf")
matplotlib.pyplot.show()
|
[
"matplotlib.pyplot.show",
"collections.defaultdict",
"plots.DotSetPlot.DotSetPlot",
"collections.OrderedDict",
"matplotlib.pyplot.savefig"
] |
[((1470, 1486), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (1481, 1486), False, 'from collections import defaultdict, OrderedDict\n'), ((1504, 1520), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (1515, 1520), False, 'from collections import defaultdict, OrderedDict\n'), ((1537, 1553), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (1548, 1553), False, 'from collections import defaultdict, OrderedDict\n'), ((2246, 2262), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (2257, 2262), False, 'from collections import defaultdict, OrderedDict\n'), ((2282, 2298), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (2293, 2298), False, 'from collections import defaultdict, OrderedDict\n'), ((5090, 5103), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5101, 5103), False, 'from collections import defaultdict, OrderedDict\n'), ((5380, 5393), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5391, 5393), False, 'from collections import defaultdict, OrderedDict\n'), ((6247, 6264), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6258, 6264), False, 'from collections import defaultdict, OrderedDict\n'), ((7918, 8021), 'matplotlib.pyplot.savefig', 'matplotlib.pyplot.savefig', (['"""/mnt/d/owncloud/markus/uni/publications/miReview/dotset_important.pdf"""'], {}), "(\n '/mnt/d/owncloud/markus/uni/publications/miReview/dotset_important.pdf')\n", (7943, 8021), False, 'import matplotlib\n'), ((8018, 8042), 'matplotlib.pyplot.show', 'matplotlib.pyplot.show', ([], {}), '()\n', (8040, 8042), False, 'import matplotlib\n'), ((1311, 1327), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (1322, 1327), False, 'from collections import defaultdict, OrderedDict\n'), ((1370, 1386), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (1381, 1386), False, 'from collections import defaultdict, OrderedDict\n'), ((1433, 1449), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (1444, 1449), False, 'from collections import defaultdict, OrderedDict\n'), ((7744, 7756), 'plots.DotSetPlot.DotSetPlot', 'DotSetPlot', ([], {}), '()\n', (7754, 7756), False, 'from plots.DotSetPlot import DotSetPlot\n')]
|
# Libraries
from pandas.io.formats.format import DataFrameFormatter
from streamlit_folium import folium_static
import pandas as pd
import numpy as np
import seaborn as sns
import streamlit as st
import sys
#! Add folder "src" as a package path
project_path = "Put/here/the/path/to/the/project's/root/folder/house_rocket_analysis"
sys.path.append(f'{project_path}/src/')
import visualization.maps as maps
#! App configuration
st.set_page_config(layout='wide')
@st.cache(allow_output_mutation=True)
def load_data(path):
data = pd.read_csv(path)
return data
# Pages definition
def sidebar():
st.sidebar.title('Select Page')
page_select = st.sidebar.selectbox( label='', options=['Final Reports', 'Maps'])
return page_select
def page_final_reports(renamed_houses, recommended_houses):
# Filter Recommended Houses to Buy DataFrame
st.sidebar.title('Search for recommended home for purchase')
id_input = str(st.sidebar.text_input(label='Enter the ID')).strip() # Input ID to search house
st.title('House Rocket Analysis')
st.title('')
st.title(f'There are {renamed_houses.shape[0]} properties available for purchase today.')
st.dataframe(renamed_houses)
st.header("Main considerations of the analysis.")
st.markdown('* The variables with the highest positive correlation with Price are "Grade" and "Sqft living".')
st.markdown('* Houses rated 8 or higher in the "Grid" (Quality of the building mateirais of the house) attribute have the best average price per rank and number of homes.')
st.markdown('* The average price of renovated homes is 22% higher than unrenovated homes.')
st.markdown('* The biggest correlation with Price and what can be added in a makeover is the bathroom and the amount of square feet of the house.')
st.markdown('* The best season for re-selling homes is Spring.')
st.header(
"""After these analyses, the recommended houses for House Rocket to buy follow the conditions:
Places with grade of variable "Grid" (Quality of the building mateirais of the house) equal or greater than 8
Houses with condition equal to or greater than 3
Houses priced below the median price in your region (ZipCode)""")
st.header("""The re-sale price of the after-purchased homes is based on the various "Total Avarage Price", which means the average value of the region's house prices (ZipCode) and the average price of the Season that the house was announced.
If the purchase price of the house is higher than the "Total Avarage Price", then the suggested selling price will be the purchase price + 10%.
If the purchase price of the house is less than the "Total Avarage Price", then the suggested selling price will be the purchase price + 30%.""")
st.header("""A column has also been added in the table representing the recommended re-sale price and the profit from re-selling the house if it is renewed.
If the house is renovated, the re-sale price and the after-sale profit will be 20% higher.
""")
st.title(f'After analysis, {recommended_houses.shape[0]} properties are recommended for purchase and re-sale.')
st.subheader('New columns have also been added at the end of the table. They represent the recommended selling price of the houses, whether it has been renovated or not, in addition to the possible profit if sold at the recommended price.')
st.text("")
try:
if not id_input:
st.dataframe(recommended_houses)
else:
if int(id_input) in recommended_houses['ID'].values:
st.dataframe(recommended_houses.loc[recommended_houses['ID'] == int(id_input)])
else:
st.error(
'Property with this ID is not recommended for purchase or there is no home with this ID.')
except:
st.error('ERROR: Input value is not a valid ID.')
#finally:
return None
def page_maps(renamed_houses, recommended_houses):
# SideBar - - -
st.sidebar.title('Filter Map')
filter_data = st.sidebar.radio(label='Filter Houses', options=[
'All houses', 'Recommended homes to buy'])
# Filters - -
if filter_data == 'Recommended homes to buy':
st.title('Map of all recommended homes for purchase')
st.header('')
data = recommended_houses.copy()
else:
st.title('Map of all available houses')
st.header('')
data = renamed_houses.copy()
# Map of density
houses_map = maps.houses_map(data)
folium_static(houses_map, width=1200, height=700)
# Map with avarage price per region (ZipCode)
st.title('Avarage Price per Region')
avg_region = maps.price_per_region(renamed_houses)
folium_static(avg_region, width=1200, height=700)
if __name__ == '__main__':
path = f"{project_path}/data/interim/renamed_data.csv"
renamed_houses = load_data(path)
path = f"{project_path}/reports/data/final_houses_sale.csv"
recommended_houses = load_data(path)
page_select = sidebar()
if page_select == 'Final Reports':
page_final_reports(renamed_houses=renamed_houses, recommended_houses=recommended_houses)
else:
page_maps(renamed_houses=renamed_houses, recommended_houses=recommended_houses)
|
[
"sys.path.append",
"streamlit.subheader",
"streamlit.set_page_config",
"streamlit.markdown",
"streamlit.dataframe",
"visualization.maps.houses_map",
"streamlit.cache",
"streamlit_folium.folium_static",
"pandas.read_csv",
"streamlit.header",
"streamlit.error",
"streamlit.title",
"streamlit.sidebar.title",
"streamlit.sidebar.selectbox",
"streamlit.text",
"streamlit.sidebar.radio",
"visualization.maps.price_per_region",
"streamlit.sidebar.text_input"
] |
[((332, 371), 'sys.path.append', 'sys.path.append', (['f"""{project_path}/src/"""'], {}), "(f'{project_path}/src/')\n", (347, 371), False, 'import sys\n'), ((428, 461), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""'}), "(layout='wide')\n", (446, 461), True, 'import streamlit as st\n'), ((464, 500), 'streamlit.cache', 'st.cache', ([], {'allow_output_mutation': '(True)'}), '(allow_output_mutation=True)\n', (472, 500), True, 'import streamlit as st\n'), ((533, 550), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (544, 550), True, 'import pandas as pd\n'), ((608, 639), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Select Page"""'], {}), "('Select Page')\n", (624, 639), True, 'import streamlit as st\n'), ((658, 723), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', ([], {'label': '""""""', 'options': "['Final Reports', 'Maps']"}), "(label='', options=['Final Reports', 'Maps'])\n", (678, 723), True, 'import streamlit as st\n'), ((864, 924), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Search for recommended home for purchase"""'], {}), "('Search for recommended home for purchase')\n", (880, 924), True, 'import streamlit as st\n'), ((1030, 1063), 'streamlit.title', 'st.title', (['"""House Rocket Analysis"""'], {}), "('House Rocket Analysis')\n", (1038, 1063), True, 'import streamlit as st\n'), ((1068, 1080), 'streamlit.title', 'st.title', (['""""""'], {}), "('')\n", (1076, 1080), True, 'import streamlit as st\n'), ((1086, 1185), 'streamlit.title', 'st.title', (['f"""There are {renamed_houses.shape[0]} properties available for purchase today."""'], {}), "(\n f'There are {renamed_houses.shape[0]} properties available for purchase today.'\n )\n", (1094, 1185), True, 'import streamlit as st\n'), ((1180, 1208), 'streamlit.dataframe', 'st.dataframe', (['renamed_houses'], {}), '(renamed_houses)\n', (1192, 1208), True, 'import streamlit as st\n'), ((1214, 1263), 'streamlit.header', 'st.header', (['"""Main considerations of the analysis."""'], {}), "('Main considerations of the analysis.')\n", (1223, 1263), True, 'import streamlit as st\n'), ((1269, 1389), 'streamlit.markdown', 'st.markdown', (['"""* The variables with the highest positive correlation with Price are "Grade" and "Sqft living"."""'], {}), '(\n \'* The variables with the highest positive correlation with Price are "Grade" and "Sqft living".\'\n )\n', (1280, 1389), True, 'import streamlit as st\n'), ((1384, 1566), 'streamlit.markdown', 'st.markdown', (['"""* Houses rated 8 or higher in the "Grid" (Quality of the building mateirais of the house) attribute have the best average price per rank and number of homes."""'], {}), '(\n \'* Houses rated 8 or higher in the "Grid" (Quality of the building mateirais of the house) attribute have the best average price per rank and number of homes.\'\n )\n', (1395, 1566), True, 'import streamlit as st\n'), ((1561, 1662), 'streamlit.markdown', 'st.markdown', (['"""* The average price of renovated homes is 22% higher than unrenovated homes."""'], {}), "(\n '* The average price of renovated homes is 22% higher than unrenovated homes.'\n )\n", (1572, 1662), True, 'import streamlit as st\n'), ((1657, 1814), 'streamlit.markdown', 'st.markdown', (['"""* The biggest correlation with Price and what can be added in a makeover is the bathroom and the amount of square feet of the house."""'], {}), "(\n '* The biggest correlation with Price and what can be added in a makeover is the bathroom and the amount of square feet of the house.'\n )\n", (1668, 1814), True, 'import streamlit as st\n'), ((1809, 1873), 'streamlit.markdown', 'st.markdown', (['"""* The best season for re-selling homes is Spring."""'], {}), "('* The best season for re-selling homes is Spring.')\n", (1820, 1873), True, 'import streamlit as st\n'), ((1879, 2230), 'streamlit.header', 'st.header', (['"""After these analyses, the recommended houses for House Rocket to buy follow the conditions:\n Places with grade of variable "Grid" (Quality of the building mateirais of the house) equal or greater than 8\n Houses with condition equal to or greater than 3\n Houses priced below the median price in your region (ZipCode)"""'], {}), '(\n """After these analyses, the recommended houses for House Rocket to buy follow the conditions:\n Places with grade of variable "Grid" (Quality of the building mateirais of the house) equal or greater than 8\n Houses with condition equal to or greater than 3\n Houses priced below the median price in your region (ZipCode)"""\n )\n', (1888, 2230), True, 'import streamlit as st\n'), ((2235, 2785), 'streamlit.header', 'st.header', (['"""The re-sale price of the after-purchased homes is based on the various "Total Avarage Price", which means the average value of the region\'s house prices (ZipCode) and the average price of the Season that the house was announced.\n If the purchase price of the house is higher than the "Total Avarage Price", then the suggested selling price will be the purchase price + 10%.\n\n If the purchase price of the house is less than the "Total Avarage Price", then the suggested selling price will be the purchase price + 30%."""'], {}), '(\n """The re-sale price of the after-purchased homes is based on the various "Total Avarage Price", which means the average value of the region\'s house prices (ZipCode) and the average price of the Season that the house was announced.\n If the purchase price of the house is higher than the "Total Avarage Price", then the suggested selling price will be the purchase price + 10%.\n\n If the purchase price of the house is less than the "Total Avarage Price", then the suggested selling price will be the purchase price + 30%."""\n )\n', (2244, 2785), True, 'import streamlit as st\n'), ((2781, 3051), 'streamlit.header', 'st.header', (['"""A column has also been added in the table representing the recommended re-sale price and the profit from re-selling the house if it is renewed.\n If the house is renovated, the re-sale price and the after-sale profit will be 20% higher.\n """'], {}), '(\n """A column has also been added in the table representing the recommended re-sale price and the profit from re-selling the house if it is renewed.\n If the house is renovated, the re-sale price and the after-sale profit will be 20% higher.\n """\n )\n', (2790, 3051), True, 'import streamlit as st\n'), ((3047, 3168), 'streamlit.title', 'st.title', (['f"""After analysis, {recommended_houses.shape[0]} properties are recommended for purchase and re-sale."""'], {}), "(\n f'After analysis, {recommended_houses.shape[0]} properties are recommended for purchase and re-sale.'\n )\n", (3055, 3168), True, 'import streamlit as st\n'), ((3164, 3414), 'streamlit.subheader', 'st.subheader', (['"""New columns have also been added at the end of the table. They represent the recommended selling price of the houses, whether it has been renovated or not, in addition to the possible profit if sold at the recommended price."""'], {}), "(\n 'New columns have also been added at the end of the table. They represent the recommended selling price of the houses, whether it has been renovated or not, in addition to the possible profit if sold at the recommended price.'\n )\n", (3176, 3414), True, 'import streamlit as st\n'), ((3409, 3420), 'streamlit.text', 'st.text', (['""""""'], {}), "('')\n", (3416, 3420), True, 'import streamlit as st\n'), ((4018, 4048), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Filter Map"""'], {}), "('Filter Map')\n", (4034, 4048), True, 'import streamlit as st\n'), ((4067, 4162), 'streamlit.sidebar.radio', 'st.sidebar.radio', ([], {'label': '"""Filter Houses"""', 'options': "['All houses', 'Recommended homes to buy']"}), "(label='Filter Houses', options=['All houses',\n 'Recommended homes to buy'])\n", (4083, 4162), True, 'import streamlit as st\n'), ((4546, 4567), 'visualization.maps.houses_map', 'maps.houses_map', (['data'], {}), '(data)\n', (4561, 4567), True, 'import visualization.maps as maps\n'), ((4572, 4621), 'streamlit_folium.folium_static', 'folium_static', (['houses_map'], {'width': '(1200)', 'height': '(700)'}), '(houses_map, width=1200, height=700)\n', (4585, 4621), False, 'from streamlit_folium import folium_static\n'), ((4677, 4713), 'streamlit.title', 'st.title', (['"""Avarage Price per Region"""'], {}), "('Avarage Price per Region')\n", (4685, 4713), True, 'import streamlit as st\n'), ((4731, 4768), 'visualization.maps.price_per_region', 'maps.price_per_region', (['renamed_houses'], {}), '(renamed_houses)\n', (4752, 4768), True, 'import visualization.maps as maps\n'), ((4773, 4822), 'streamlit_folium.folium_static', 'folium_static', (['avg_region'], {'width': '(1200)', 'height': '(700)'}), '(avg_region, width=1200, height=700)\n', (4786, 4822), False, 'from streamlit_folium import folium_static\n'), ((4272, 4325), 'streamlit.title', 'st.title', (['"""Map of all recommended homes for purchase"""'], {}), "('Map of all recommended homes for purchase')\n", (4280, 4325), True, 'import streamlit as st\n'), ((4334, 4347), 'streamlit.header', 'st.header', (['""""""'], {}), "('')\n", (4343, 4347), True, 'import streamlit as st\n'), ((4408, 4447), 'streamlit.title', 'st.title', (['"""Map of all available houses"""'], {}), "('Map of all available houses')\n", (4416, 4447), True, 'import streamlit as st\n'), ((4456, 4469), 'streamlit.header', 'st.header', (['""""""'], {}), "('')\n", (4465, 4469), True, 'import streamlit as st\n'), ((3468, 3500), 'streamlit.dataframe', 'st.dataframe', (['recommended_houses'], {}), '(recommended_houses)\n', (3480, 3500), True, 'import streamlit as st\n'), ((3851, 3900), 'streamlit.error', 'st.error', (['"""ERROR: Input value is not a valid ID."""'], {}), "('ERROR: Input value is not a valid ID.')\n", (3859, 3900), True, 'import streamlit as st\n'), ((944, 987), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', ([], {'label': '"""Enter the ID"""'}), "(label='Enter the ID')\n", (965, 987), True, 'import streamlit as st\n'), ((3710, 3819), 'streamlit.error', 'st.error', (['"""Property with this ID is not recommended for purchase or there is no home with this ID."""'], {}), "(\n 'Property with this ID is not recommended for purchase or there is no home with this ID.'\n )\n", (3718, 3819), True, 'import streamlit as st\n')]
|
from typing import Any, Dict, List, Type, TypeVar
import attr
from ..models.severity_response_body import SeverityResponseBody
T = TypeVar("T", bound="SeveritiesListResponseBody")
@attr.s(auto_attribs=True)
class SeveritiesListResponseBody:
"""
Example:
{'severities': [{'created_at': '2021-08-17T13:28:57.801578Z', 'description': "It's not really that bad, everyone
chill", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at':
'2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': "It's not really
that bad, everyone chill", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at':
'2021-08-17T13:28:57.801578Z'}]}
Attributes:
severities (List[SeverityResponseBody]): Example: [{'created_at': '2021-08-17T13:28:57.801578Z', 'description':
"It's not really that bad, everyone chill", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1,
'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': "It's
not really that bad, everyone chill", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1,
'updated_at': '2021-08-17T13:28:57.801578Z'}, {'created_at': '2021-08-17T13:28:57.801578Z', 'description': "It's
not really that bad, everyone chill", 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1,
'updated_at': '2021-08-17T13:28:57.801578Z'}].
"""
severities: List[SeverityResponseBody]
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
severities = []
for severities_item_data in self.severities:
severities_item = severities_item_data.to_dict()
severities.append(severities_item)
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"severities": severities,
}
)
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
severities = []
_severities = d.pop("severities")
for severities_item_data in _severities:
severities_item = SeverityResponseBody.from_dict(severities_item_data)
severities.append(severities_item)
severities_list_response_body = cls(
severities=severities,
)
severities_list_response_body.additional_properties = d
return severities_list_response_body
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
[
"attr.s",
"typing.TypeVar",
"attr.ib"
] |
[((134, 182), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': '"""SeveritiesListResponseBody"""'}), "('T', bound='SeveritiesListResponseBody')\n", (141, 182), False, 'from typing import Any, Dict, List, Type, TypeVar\n'), ((186, 211), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)'}), '(auto_attribs=True)\n', (192, 211), False, 'import attr\n'), ((1661, 1694), 'attr.ib', 'attr.ib', ([], {'init': '(False)', 'factory': 'dict'}), '(init=False, factory=dict)\n', (1668, 1694), False, 'import attr\n')]
|
import numpy as np
from matplotlib import pyplot as plt
def loadFile(filename):
f = open(filename,'r')
text = f.read()
f.close()
rewards = []
steps = []
for line in text.split('\n'):
pieces = line.split(',')
if(len(pieces) == 2):
rewards.append(float(pieces[0]))
steps.append(int(pieces[1]))
return rewards,steps
def loadFiles(files):
rewards = []
steps = []
for f in files:
r,s = loadFile(f)
rewards.extend(r)
steps.extend(s)
return rewards,steps,
def plotResults(rewards,steps,outputFile):
plt.subplot(2,1,1)
plt.plot(rewards)
plt.xlabel('number of games played')
plt.ylabel('reward received per game')
plt.subplot(2,1,2)
plt.plot(steps)
plt.xlabel('number of games played')
plt.ylabel('number of actions taken per game')
plt.savefig(outputFile)
def Average(rewards,n):
return [np.mean(rewards[i:i+n]) for i in range(len(rewards)-n)]
if(__name__ == "__main__"):
LargeAgent = ['LargeAgent/2018-01-15 11:50:29.380284']
SmallAgent = ['SmallAgent/2018-01-15 13:12:18.774147']
rewards,steps = loadFiles(['./SmallAgent/29-01-2018'])
rewards = Average(rewards,10)
steps = Average(steps,10)
plotResults(rewards,steps,"./test.png")
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((559, 579), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (570, 579), True, 'from matplotlib import pyplot as plt\n'), ((580, 597), 'matplotlib.pyplot.plot', 'plt.plot', (['rewards'], {}), '(rewards)\n', (588, 597), True, 'from matplotlib import pyplot as plt\n'), ((600, 636), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of games played"""'], {}), "('number of games played')\n", (610, 636), True, 'from matplotlib import pyplot as plt\n'), ((639, 677), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""reward received per game"""'], {}), "('reward received per game')\n", (649, 677), True, 'from matplotlib import pyplot as plt\n'), ((683, 703), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (694, 703), True, 'from matplotlib import pyplot as plt\n'), ((704, 719), 'matplotlib.pyplot.plot', 'plt.plot', (['steps'], {}), '(steps)\n', (712, 719), True, 'from matplotlib import pyplot as plt\n'), ((722, 758), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of games played"""'], {}), "('number of games played')\n", (732, 758), True, 'from matplotlib import pyplot as plt\n'), ((761, 807), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""number of actions taken per game"""'], {}), "('number of actions taken per game')\n", (771, 807), True, 'from matplotlib import pyplot as plt\n'), ((813, 836), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outputFile'], {}), '(outputFile)\n', (824, 836), True, 'from matplotlib import pyplot as plt\n'), ((872, 897), 'numpy.mean', 'np.mean', (['rewards[i:i + n]'], {}), '(rewards[i:i + n])\n', (879, 897), True, 'import numpy as np\n')]
|
import pyshark
class Audio_Scraper:
def __init__(self, pcap, filter, outfile):
self.pcap = pcap
self.filter = filter
self.outfile = outfile
def scraper(self):
rtp_list =[]
pcap_file = self.pcap
out_file = self.outfile
print("Scraping: " + pcap_file)
filter_type = self.filter
cap = pyshark.FileCapture(pcap_file,display_filter=filter_type)
raw_audio = open(out_file,'wb')
for i in cap:
try:
rtp = i[3]
#data = rtp.get_field_value('DATA')
data = rtp.payload
if ":" in data:
print(data)
rtp_list.append(data.split(":"))
except:
pass
for rtp_packet in rtp_list:
packet = " ".join(rtp_packet)
print(packet)
audio = bytearray.fromhex(packet)
raw_audio.write(audio)
print("\nFinished outputing raw audio: %s" % out_file)
# pcap_test = Audio_Scraper("my.pcap","rtp","my_audio.raw").scraper()
|
[
"pyshark.FileCapture"
] |
[((364, 422), 'pyshark.FileCapture', 'pyshark.FileCapture', (['pcap_file'], {'display_filter': 'filter_type'}), '(pcap_file, display_filter=filter_type)\n', (383, 422), False, 'import pyshark\n')]
|
from __future__ import print_function
import time
import avacloud_client_python
from avacloud_client_python.rest import ApiException
import requests
import os
import json
client_id = 'use_your_own_value'
client_secret = '<PASSWORD>_your_own_value'
url = 'https://identity.dangl-it.com/connect/token'
payload = {'grant_type': 'client_credentials', 'scope': 'avacloud'}
response = requests.post(url, data=payload, auth=(client_id, client_secret))
access_token = response.json()['access_token']
# Configure OAuth2 access token for authorization: Dangl.Identity
configuration = avacloud_client_python.Configuration()
configuration.access_token = access_token
# Here, a very small project is created and saved as GAEB file
try:
ava_api_instance = avacloud_client_python.AvaConversionApi(avacloud_client_python.ApiClient(configuration))
ava_project = json.loads("""{
"projectInformation": {
"itemNumberSchema": {
"tiers": [
{
"length": 2,
"tierType": "Group"
},
{
"length": 2,
"tierType": "Group"
},
{
"length": 4,
"tierType": "Position"
}
]
}
},
"serviceSpecifications": [
{
"projectTaxRate": 0.19,
"elements": [
{
"elementTypeDiscriminator": "ServiceSpecificationGroupDto",
"shortText": "Parent Group",
"itemNumber": {
"stringRepresentation": "01."
},
"elements": [
{
"elementTypeDiscriminator": "ServiceSpecificationGroupDto",
"shortText": "Sub Group",
"itemNumber": {
"stringRepresentation": "01.02."
},
"elements": [
{
"elementTypeDiscriminator": "PositionDto",
"shortText": "Hello Position!",
"itemNumber": {
"stringRepresentation": "01.02.0500"
},
"quantityOverride": 10,
"unitPriceOverride": 5
}
]
}
]
}
]
}
]
}""")
# See https://github.com/swagger-api/swagger-codegen/issues/2305 for more info about why you should use _preload_content=False
# If the _preload_content parameter is not set to False, the binary response content (file) will be attempted to be decoded as UTF8 string,
# this would lead to an error. Instead, the raw response should be used
api_response = ava_api_instance.ava_conversion_convert_to_gaeb(ava_project,
destination_gaeb_type='GaebXml_V3_2',
target_exchange_phase_transform='Grant',
_preload_content=False)
with open("./NewProject.X86", "wb") as gaeb_file:
gaeb_file.write(api_response.data)
except ApiException as e:
print("Exception when calling AvaConversionApi->ava_conversion_convert_to_gaeb: %s\n" % e)
|
[
"requests.post",
"avacloud_client_python.Configuration",
"json.loads",
"avacloud_client_python.ApiClient"
] |
[((381, 446), 'requests.post', 'requests.post', (['url'], {'data': 'payload', 'auth': '(client_id, client_secret)'}), '(url, data=payload, auth=(client_id, client_secret))\n', (394, 446), False, 'import requests\n'), ((577, 615), 'avacloud_client_python.Configuration', 'avacloud_client_python.Configuration', ([], {}), '()\n', (613, 615), False, 'import avacloud_client_python\n'), ((857, 2317), 'json.loads', 'json.loads', (['"""{\n "projectInformation": {\n "itemNumberSchema": {\n "tiers": [\n {\n "length": 2,\n "tierType": "Group"\n },\n {\n "length": 2,\n "tierType": "Group"\n },\n {\n "length": 4,\n "tierType": "Position"\n }\n ]\n }\n },\n "serviceSpecifications": [\n {\n "projectTaxRate": 0.19,\n "elements": [\n {\n "elementTypeDiscriminator": "ServiceSpecificationGroupDto",\n "shortText": "Parent Group",\n "itemNumber": {\n "stringRepresentation": "01."\n },\n "elements": [\n {\n "elementTypeDiscriminator": "ServiceSpecificationGroupDto",\n "shortText": "Sub Group",\n "itemNumber": {\n "stringRepresentation": "01.02."\n },\n "elements": [\n {\n "elementTypeDiscriminator": "PositionDto",\n "shortText": "Hello Position!",\n "itemNumber": {\n "stringRepresentation": "01.02.0500"\n },\n "quantityOverride": 10,\n "unitPriceOverride": 5\n }\n ]\n }\n ]\n }\n ]\n }\n ]\n }"""'], {}), '(\n """{\n "projectInformation": {\n "itemNumberSchema": {\n "tiers": [\n {\n "length": 2,\n "tierType": "Group"\n },\n {\n "length": 2,\n "tierType": "Group"\n },\n {\n "length": 4,\n "tierType": "Position"\n }\n ]\n }\n },\n "serviceSpecifications": [\n {\n "projectTaxRate": 0.19,\n "elements": [\n {\n "elementTypeDiscriminator": "ServiceSpecificationGroupDto",\n "shortText": "Parent Group",\n "itemNumber": {\n "stringRepresentation": "01."\n },\n "elements": [\n {\n "elementTypeDiscriminator": "ServiceSpecificationGroupDto",\n "shortText": "Sub Group",\n "itemNumber": {\n "stringRepresentation": "01.02."\n },\n "elements": [\n {\n "elementTypeDiscriminator": "PositionDto",\n "shortText": "Hello Position!",\n "itemNumber": {\n "stringRepresentation": "01.02.0500"\n },\n "quantityOverride": 10,\n "unitPriceOverride": 5\n }\n ]\n }\n ]\n }\n ]\n }\n ]\n }"""\n )\n', (867, 2317), False, 'import json\n'), ((790, 837), 'avacloud_client_python.ApiClient', 'avacloud_client_python.ApiClient', (['configuration'], {}), '(configuration)\n', (822, 837), False, 'import avacloud_client_python\n')]
|
from rapidtest import Test, Case, TreeNode
from solutions.binary_tree_preorder_traversal import Solution
with Test(Solution) as test:
Case(TreeNode.from_string('[1,null,2,3]'), result=[1, 2, 3])
Case(TreeNode.from_string('[]'), result=[])
Case(TreeNode.from_string('[1]'), result=[1])
Case(TreeNode.from_string('[1,2]'), result=[1, 2])
Case(TreeNode.from_string('[1,2]'), result=[1, 2])
Case(TreeNode.from_string(
'[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'),
result=[1, 2, 4, 6, 6, 1, 0, 3, 9, 2, 7, 8, 4, 5, 4, 5, 2, 4, 6, 3, 8, 2])
|
[
"rapidtest.Test",
"rapidtest.TreeNode.from_string"
] |
[((111, 125), 'rapidtest.Test', 'Test', (['Solution'], {}), '(Solution)\n', (115, 125), False, 'from rapidtest import Test, Case, TreeNode\n'), ((144, 180), 'rapidtest.TreeNode.from_string', 'TreeNode.from_string', (['"""[1,null,2,3]"""'], {}), "('[1,null,2,3]')\n", (164, 180), False, 'from rapidtest import Test, Case, TreeNode\n'), ((209, 235), 'rapidtest.TreeNode.from_string', 'TreeNode.from_string', (['"""[]"""'], {}), "('[]')\n", (229, 235), False, 'from rapidtest import Test, Case, TreeNode\n'), ((257, 284), 'rapidtest.TreeNode.from_string', 'TreeNode.from_string', (['"""[1]"""'], {}), "('[1]')\n", (277, 284), False, 'from rapidtest import Test, Case, TreeNode\n'), ((307, 336), 'rapidtest.TreeNode.from_string', 'TreeNode.from_string', (['"""[1,2]"""'], {}), "('[1,2]')\n", (327, 336), False, 'from rapidtest import Test, Case, TreeNode\n'), ((362, 391), 'rapidtest.TreeNode.from_string', 'TreeNode.from_string', (['"""[1,2]"""'], {}), "('[1,2]')\n", (382, 391), False, 'from rapidtest import Test, Case, TreeNode\n'), ((417, 536), 'rapidtest.TreeNode.from_string', 'TreeNode.from_string', (['"""[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]"""'], {}), "(\n '[1,2,null,4,5,null,6,2,null,6,8,4,null,1,2,4,null,6,8,0,9,null,7,5,4,null,3,null,2,3]'\n )\n", (437, 536), False, 'from rapidtest import Test, Case, TreeNode\n')]
|
"""create tokens table
Revision ID: 1<PASSWORD>
Revises:
Create Date: 2020-12-12 01:44:28.195736
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, ForeignKey
from sqlalchemy.engine.reflection import Inspector
from flask_sqlalchemy import SQLAlchemy
# revision identifiers, used by Alembic.
revision = '1<PASSWORD>'
down_revision = None
branch_labels = None
depends_on = None
db = SQLAlchemy()
def upgrade():
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
tables = inspector.get_table_names()
if 'ips' not in tables:
op.create_table(
'ips',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('address', sa.String(255), nullable=True)
)
if 'tokens' not in tables:
op.create_table(
'tokens',
sa.Column('name', String(255), primary_key=True),
sa.Column('expiration_date', DateTime, nullable=True),
sa.Column('max_usage', Integer, default=1),
sa.Column('used', Integer, default=0),
sa.Column('disabled', Boolean, default=False),
sa.Column('ips', Integer, ForeignKey('association.id'))
)
else:
try:
with op.batch_alter_table('tokens') as batch_op:
batch_op.alter_column('ex_date', new_column_name='expiration_date', nullable=True)
batch_op.alter_column('one_time', new_column_name='max_usage')
batch_op.add_column(
Column('disabled', Boolean, default=False)
)
except KeyError:
pass
if 'association' not in tables:
op.create_table(
'association', db.Model.metadata,
Column('ips', String, ForeignKey('ips.address'), primary_key=True),
Column('tokens', Integer, ForeignKey('tokens.name'), primary_key=True)
)
op.execute("update tokens set expiration_date=null where expiration_date='None'")
def downgrade():
op.alter_column('tokens', 'expiration_date', new_column_name='ex_date')
op.alter_column('tokens', 'max_usage', new_column_name='one_time')
|
[
"alembic.op.alter_column",
"sqlalchemy.ForeignKey",
"flask_sqlalchemy.SQLAlchemy",
"sqlalchemy.engine.reflection.Inspector.from_engine",
"alembic.op.execute",
"alembic.op.get_bind",
"sqlalchemy.Column",
"sqlalchemy.String",
"alembic.op.batch_alter_table"
] |
[((461, 473), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (471, 473), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((502, 515), 'alembic.op.get_bind', 'op.get_bind', ([], {}), '()\n', (513, 515), False, 'from alembic import op\n'), ((532, 559), 'sqlalchemy.engine.reflection.Inspector.from_engine', 'Inspector.from_engine', (['conn'], {}), '(conn)\n', (553, 559), False, 'from sqlalchemy.engine.reflection import Inspector\n'), ((1972, 2058), 'alembic.op.execute', 'op.execute', (['"""update tokens set expiration_date=null where expiration_date=\'None\'"""'], {}), '(\n "update tokens set expiration_date=null where expiration_date=\'None\'")\n', (1982, 2058), False, 'from alembic import op\n'), ((2078, 2149), 'alembic.op.alter_column', 'op.alter_column', (['"""tokens"""', '"""expiration_date"""'], {'new_column_name': '"""ex_date"""'}), "('tokens', 'expiration_date', new_column_name='ex_date')\n", (2093, 2149), False, 'from alembic import op\n'), ((2154, 2220), 'alembic.op.alter_column', 'op.alter_column', (['"""tokens"""', '"""max_usage"""'], {'new_column_name': '"""one_time"""'}), "('tokens', 'max_usage', new_column_name='one_time')\n", (2169, 2220), False, 'from alembic import op\n'), ((686, 731), 'sqlalchemy.Column', 'sa.Column', (['"""id"""', 'sa.Integer'], {'primary_key': '(True)'}), "('id', sa.Integer, primary_key=True)\n", (695, 731), True, 'import sqlalchemy as sa\n'), ((960, 1013), 'sqlalchemy.Column', 'sa.Column', (['"""expiration_date"""', 'DateTime'], {'nullable': '(True)'}), "('expiration_date', DateTime, nullable=True)\n", (969, 1013), True, 'import sqlalchemy as sa\n'), ((1027, 1069), 'sqlalchemy.Column', 'sa.Column', (['"""max_usage"""', 'Integer'], {'default': '(1)'}), "('max_usage', Integer, default=1)\n", (1036, 1069), True, 'import sqlalchemy as sa\n'), ((1083, 1120), 'sqlalchemy.Column', 'sa.Column', (['"""used"""', 'Integer'], {'default': '(0)'}), "('used', Integer, default=0)\n", (1092, 1120), True, 'import sqlalchemy as sa\n'), ((1134, 1179), 'sqlalchemy.Column', 'sa.Column', (['"""disabled"""', 'Boolean'], {'default': '(False)'}), "('disabled', Boolean, default=False)\n", (1143, 1179), True, 'import sqlalchemy as sa\n'), ((766, 780), 'sqlalchemy.String', 'sa.String', (['(255)'], {}), '(255)\n', (775, 780), True, 'import sqlalchemy as sa\n'), ((916, 927), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (922, 927), False, 'from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, ForeignKey\n'), ((1219, 1247), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""association.id"""'], {}), "('association.id')\n", (1229, 1247), False, 'from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, ForeignKey\n'), ((1299, 1329), 'alembic.op.batch_alter_table', 'op.batch_alter_table', (['"""tokens"""'], {}), "('tokens')\n", (1319, 1329), False, 'from alembic import op\n'), ((1821, 1846), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""ips.address"""'], {}), "('ips.address')\n", (1831, 1846), False, 'from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, ForeignKey\n'), ((1905, 1930), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""tokens.name"""'], {}), "('tokens.name')\n", (1915, 1930), False, 'from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, ForeignKey\n'), ((1579, 1621), 'sqlalchemy.Column', 'Column', (['"""disabled"""', 'Boolean'], {'default': '(False)'}), "('disabled', Boolean, default=False)\n", (1585, 1621), False, 'from sqlalchemy import Table, Column, Integer, String, Boolean, DateTime, ForeignKey\n')]
|
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA_MCCNN
#
# https://github.com/CNES/Pandora_MCCNN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions to test the cost volume create by mc_cnn
"""
import unittest
import numpy as np
import torch
import torch.nn as nn
from mc_cnn.run import computes_cost_volume_mc_cnn_fast
from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer
from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator
from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator
# pylint: disable=no-self-use
class TestMCCNN(unittest.TestCase):
"""
TestMCCNN class allows to test the cost volume create by mc_cnn
"""
def setUp(self):
"""
Method called to prepare the test fixture
"""
self.ref_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1))
self.sec_img_0 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) + 1
self.ref_img_1 = np.tile(np.arange(13, dtype=np.float32), (13, 1))
self.sec_img_2 = np.tile(np.arange(13, dtype=np.float32), (13, 1)) - 1
def test_computes_cost_volume_mc_cnn_fast(self):
""" "
Test the computes_cost_volume_mc_cnn_fast function
"""
# create reference and secondary features
ref_feature = torch.randn((64, 4, 4), dtype=torch.float64)
sec_features = torch.randn((64, 4, 4), dtype=torch.float64)
cos = nn.CosineSimilarity(dim=0, eps=1e-6)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 5), np.nan)
# disparity -2
cv_gt[:, 2:, 0] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy()
# disparity -1
cv_gt[:, 1:, 1] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy()
# disparity 0
cv_gt[:, :, 2] = cos(ref_feature[:, :, :], sec_features[:, :, :]).cpu().detach().numpy()
# disparity 1
cv_gt[:, :3, 3] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy()
# disparity 2
cv_gt[:, :2, 4] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy()
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -2, 2)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def test_computes_cost_volume_mc_cnn_fast_negative_disp(self):
""" "
Test the computes_cost_volume_mc_cnn_fast function with negative disparities
"""
# create reference and secondary features
ref_feature = torch.randn((64, 4, 4), dtype=torch.float64)
sec_features = torch.randn((64, 4, 4), dtype=torch.float64)
cos = nn.CosineSimilarity(dim=0, eps=1e-6)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 4), np.nan)
# disparity -4
# all nan
# disparity -3
cv_gt[:, 3:, 1] = cos(ref_feature[:, :, 3:], sec_features[:, :, 0:1]).cpu().detach().numpy()
# disparity -2
cv_gt[:, 2:, 2] = cos(ref_feature[:, :, 2:], sec_features[:, :, 0:2]).cpu().detach().numpy()
# disparity -1
cv_gt[:, 1:, 3] = cos(ref_feature[:, :, 1:], sec_features[:, :, 0:3]).cpu().detach().numpy()
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, -4, -1)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def test_computes_cost_volume_mc_cnn_fast_positive_disp(self):
""" "
Test the computes_cost_volume_mc_cnn_fast function with positive disparities
"""
# create reference and secondary features
ref_feature = torch.randn((64, 4, 4), dtype=torch.float64)
sec_features = torch.randn((64, 4, 4), dtype=torch.float64)
cos = nn.CosineSimilarity(dim=0, eps=1e-6)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 4), np.nan)
# disparity 1
cv_gt[:, :3, 0] = cos(ref_feature[:, :, :3], sec_features[:, :, 1:4]).cpu().detach().numpy()
# disparity 2
cv_gt[:, :2, 1] = cos(ref_feature[:, :, :2], sec_features[:, :, 2:4]).cpu().detach().numpy()
# disparity 3
cv_gt[:, :1, 2] = cos(ref_feature[:, :, :1], sec_features[:, :, 3:]).cpu().detach().numpy()
# disparity 4
# all nan
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
cv = computes_cost_volume_mc_cnn_fast(ref_feature, sec_features, 1, 4)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def sad_cost(self, ref_features, sec_features):
"""
Useful to test the computes_cost_volume_mc_cnn_accurate function
"""
return torch.sum(abs(ref_features[0, :, :, :] - sec_features[0, :, :, :]), dim=0)
def test_computes_cost_volume_mc_cnn_accurate(self):
""" "
Test the computes_cost_volume_mc_cnn_accurate function
"""
# create reference and secondary features
ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64)
sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 5), np.nan)
# disparity -2
cv_gt[:, 2:, 0] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy()
# disparity -1
cv_gt[:, 1:, 1] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy()
# disparity 0
cv_gt[:, :, 2] = self.sad_cost(ref_feature[:, :, :, :], sec_features[:, :, :, :]).cpu().detach().numpy()
# disparity 1
cv_gt[:, :3, 3] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy()
# disparity 2
cv_gt[:, :2, 4] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy()
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
acc = AccMcCnnInfer()
# Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions
cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -2, 2, self.sad_cost)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def test_computes_cost_volume_mc_cnn_accuratenegative_disp(self):
""" "
Test the computes_cost_volume_mc_cnn_accurate function with negative disparities
"""
# create reference and secondary features
ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64)
sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 4), np.nan)
# disparity -4
# all nan
# disparity -3
cv_gt[:, 3:, 1] = self.sad_cost(ref_feature[:, :, :, 3:], sec_features[:, :, :, 0:1]).cpu().detach().numpy()
# disparity -2
cv_gt[:, 2:, 2] = self.sad_cost(ref_feature[:, :, :, 2:], sec_features[:, :, :, 0:2]).cpu().detach().numpy()
# disparity -1
cv_gt[:, 1:, 3] = self.sad_cost(ref_feature[:, :, :, 1:], sec_features[:, :, :, 0:3]).cpu().detach().numpy()
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
acc = AccMcCnnInfer()
# Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions
cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, -4, -1, self.sad_cost)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
def test_computes_cost_volume_mc_cnn_accurate_positive_disp(self):
""" "
Test the computes_cost_volume_mc_cnn_accurate function with positive disparities
"""
# create reference and secondary features
ref_feature = torch.randn((1, 112, 4, 4), dtype=torch.float64)
sec_features = torch.randn((1, 112, 4, 4), dtype=torch.float64)
# Create the ground truth cost volume (row, col, disp)
cv_gt = np.full((4, 4, 4), np.nan)
# disparity 1
cv_gt[:, :3, 0] = self.sad_cost(ref_feature[:, :, :, :3], sec_features[:, :, :, 1:4]).cpu().detach().numpy()
# disparity 2
cv_gt[:, :2, 1] = self.sad_cost(ref_feature[:, :, :, :2], sec_features[:, :, :, 2:4]).cpu().detach().numpy()
# disparity 3
cv_gt[:, :1, 2] = self.sad_cost(ref_feature[:, :, :, :1], sec_features[:, :, :, 3:]).cpu().detach().numpy()
# disparity 4
# all nan
# The minus sign converts the similarity score to a matching cost
cv_gt *= -1
acc = AccMcCnnInfer()
# Because input shape of nn.Conv2d is (Batch_size, Channel, H, W), we add 1 dimensions
cv = acc.computes_cost_volume_mc_cnn_accurate(ref_feature, sec_features, 1, 4, self.sad_cost)
# Check if the calculated cost volume is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(cv, cv_gt, rtol=1e-05)
# pylint: disable=invalid-name
# -> because changing the name here loses the reference to the actual name of the checked function
def test_MiddleburyGenerator(self):
"""
test the function MiddleburyGenerator
"""
# Script use to create images_middlebury and samples_middlebury :
# pylint: disable=pointless-string-statement
"""
# shape 1, 2, 13, 13 : 1 exposures, 2 = left and right images
image_pairs_0 = np.zeros((1, 2, 13, 13))
# left
image_pairs_0[0, 0, :, :] = np.tile(np.arange(13), (13, 1))
# right
image_pairs_0[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) + 1
image_pairs_1 = np.zeros((1, 2, 13, 13))
image_pairs_1[0, 0, :, :] = np.tile(np.arange(13), (13, 1))
image_pairs_1[0, 1, :, :] = np.tile(np.arange(13), (13, 1)) - 1
img_file = h5py.File('images_middlebury.hdf5', 'w')
img_0 = [image_pairs_0]
grp = img_file.create_group(str(0))
# 1 illumination
for light in range(len(img_0)):
dset = grp.create_dataset(str(light), data=img_0[light])
img_1 = [image_pairs_1]
grp = img_file.create_group(str(1))
for light in range(len(img_1)):
dset = grp.create_dataset(str(light), data=img_1[light])
sampl_file = h5py.File('sample_middlebury.hdf5', 'w')
# disparity of image_pairs_0
x0 = np.array([[0., 5., 6., 1.]
[0., 7., 7., 1.]])
# disparity of image_pairs_1
x1 = np.array([[ 1., 7., 5., -1.]
[ 0., 0., 0., 0.]])
sampl_file.create_dataset(str(0), data=x0)
sampl_file.create_dataset(str(1), data=x1)
"""
# Positive disparity
cfg = {
"data_augmentation": False,
"dataset_neg_low": 1,
"dataset_neg_high": 1,
"dataset_pos": 0,
"augmentation_param": {
"vertical_disp": 0,
"scale": 0.8,
"hscale": 0.8,
"hshear": 0.1,
"trans": 0,
"rotate": 28,
"brightness": 1.3,
"contrast": 1.1,
"d_hscale": 0.9,
"d_hshear": 0.3,
"d_vtrans": 1,
"d_rotate": 3,
"d_brightness": 0.7,
"d_contrast": 1.1,
},
}
training_loader = MiddleburyGenerator("tests/sample_middlebury.hdf5", "tests/images_middlebury.hdf5", cfg)
# Patch of shape 3, 11, 11
# With the firt dimension = left patch, right positive patch, right negative patch
patch = training_loader.__getitem__(0)
x_ref_patch = 6
y_ref_patch = 5
patch_size = 5
gt_ref_patch = self.ref_img_0[
y_ref_patch - patch_size : y_ref_patch + patch_size + 1,
x_ref_patch - patch_size : x_ref_patch + patch_size + 1,
]
# disp = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
disp = 1
x_sec_pos_patch = x_ref_patch - disp
y_sec_pos_patch = 5
gt_sec_pos_patch = self.sec_img_0[
y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1,
x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1,
]
# dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
dataset_neg = 1
x_sec_neg_patch = x_ref_patch - disp + dataset_neg
y_sec_neg_patch = 5
gt_sec_neg_patch = self.sec_img_0[
y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1,
x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1,
]
gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)
# Check if the calculated patch is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(patch, gt_path)
# negative disparity
patch = training_loader.__getitem__(2)
x_ref_patch = 5
y_ref_patch = 7
patch_size = 5
gt_ref_patch = self.ref_img_0[
y_ref_patch - patch_size : y_ref_patch + patch_size + 1,
x_ref_patch - patch_size : x_ref_patch + patch_size + 1,
]
# disp = -1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
disp = -1
x_sec_pos_patch = x_ref_patch - disp
y_sec_pos_patch = 5
gt_sec_pos_patch = self.sec_img_0[
y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1,
x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1,
]
# dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
dataset_neg = 1
x_sec_neg_patch = x_ref_patch - disp + dataset_neg
y_sec_neg_patch = 5
gt_sec_neg_patch = self.sec_img_0[
y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1,
x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1,
]
gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)
# Check if the calculated patch is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(patch, gt_path)
# pylint: disable=invalid-name
# -> because changing the name here loses the reference to the actual name of the checked function
def test_DataFusionContestGenerator(self):
"""
test the function DataFusionContestGenerator
"""
# pylint: disable=pointless-string-statement
"""
# Script use to create images_middlebury and samples_middlebury :
# shape 2, 13, 13 : 2 = left and right images, row, col
image_pairs_0 = np.zeros((2, 13, 13))
# left
image_pairs_0[0, :, :] = np.tile(np.arange(13), (13, 1))
# right
image_pairs_0[1, :, :] = np.tile(np.arange(13), (13, 1)) + 1
image_pairs_1 = np.zeros((2, 13, 13))
image_pairs_1[0, :, :] = np.tile(np.arange(13), (13, 1))
image_pairs_1[1, :, :] = np.tile(np.arange(13), (13, 1)) - 1
img_file = h5py.File('images_dfc.hdf5', 'w')
img_file.create_dataset(str(0), data=image_pairs_0)
img_file.create_dataset(str(1), data=image_pairs_1)
sampl_file = h5py.File('sample_dfc.hdf5', 'w')
# disparity of image_pairs_0
x0 = np.array([[0., 5., 6., 1.],
[0., 7., 7., 1.]])
# disparity of image_pairs_1
x1 = np.array([[ 1., 7., 5., -1.],
[ 0., 0., 0., 0.]])
sampl_file.create_dataset(str(0), data=x0)
sampl_file.create_dataset(str(1), data=x1)
"""
# Positive disparity
cfg = {
"data_augmentation": False,
"dataset_neg_low": 1,
"dataset_neg_high": 1,
"dataset_pos": 0,
"vertical_disp": 0,
"augmentation_param": {
"scale": 0.8,
"hscale": 0.8,
"hshear": 0.1,
"trans": 0,
"rotate": 28,
"brightness": 1.3,
"contrast": 1.1,
"d_hscale": 0.9,
"d_hshear": 0.3,
"d_vtrans": 1,
"d_rotate": 3,
"d_brightness": 0.7,
"d_contrast": 1.1,
},
}
training_loader = DataFusionContestGenerator("tests/sample_dfc.hdf5", "tests/images_dfc.hdf5", cfg)
# Patch of shape 3, 11, 11
# With the firt dimension = left patch, right positive patch, right negative patch
patch = training_loader.__getitem__(0)
x_ref_patch = 6
y_ref_patch = 5
patch_size = 5
gt_ref_patch = self.ref_img_0[
y_ref_patch - patch_size : y_ref_patch + patch_size + 1,
x_ref_patch - patch_size : x_ref_patch + patch_size + 1,
]
# disp = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
disp = 1
x_sec_pos_patch = x_ref_patch - disp
y_sec_pos_patch = 5
gt_sec_pos_patch = self.sec_img_0[
y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1,
x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1,
]
# dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
dataset_neg = 1
x_sec_neg_patch = x_ref_patch - disp + dataset_neg
y_sec_neg_patch = 5
gt_sec_neg_patch = self.sec_img_0[
y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1,
x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1,
]
gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)
# Check if the calculated patch is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(patch, gt_path)
# negative disparity
patch = training_loader.__getitem__(2)
x_ref_patch = 5
y_ref_patch = 7
patch_size = 5
gt_ref_patch = self.ref_img_1[
y_ref_patch - patch_size : y_ref_patch + patch_size + 1,
x_ref_patch - patch_size : x_ref_patch + patch_size + 1,
]
# disp = -1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
disp = -1
x_sec_pos_patch = x_ref_patch - disp
y_sec_pos_patch = 7
gt_sec_pos_patch = self.sec_img_2[
y_sec_pos_patch - patch_size : y_sec_pos_patch + patch_size + 1,
x_sec_pos_patch - patch_size : x_sec_pos_patch + patch_size + 1,
]
# dataset_neg_low & dataset_neg_high = 1, with middlebury image convention img_ref(x,y) = img_sec(x-d,y)
dataset_neg = 1
x_sec_neg_patch = x_ref_patch - disp + dataset_neg
y_sec_neg_patch = 7
gt_sec_neg_patch = self.sec_img_2[
y_sec_neg_patch - patch_size : y_sec_neg_patch + patch_size + 1,
x_sec_neg_patch - patch_size : x_sec_neg_patch + patch_size + 1,
]
gt_path = np.stack((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)
# Check if the calculated patch is equal to the ground truth (same shape and all elements equals)
np.testing.assert_array_equal(patch, gt_path)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"numpy.full",
"numpy.stack",
"numpy.testing.assert_array_equal",
"torch.randn",
"torch.nn.CosineSimilarity",
"mc_cnn.run.computes_cost_volume_mc_cnn_fast",
"mc_cnn.dataset_generator.middlebury_generator.MiddleburyGenerator",
"numpy.arange",
"numpy.testing.assert_allclose",
"mc_cnn.dataset_generator.datas_fusion_contest_generator.DataFusionContestGenerator",
"mc_cnn.model.mc_cnn_accurate.AccMcCnnInfer"
] |
[((21139, 21154), 'unittest.main', 'unittest.main', ([], {}), '()\n', (21152, 21154), False, 'import unittest\n'), ((1942, 1986), 'torch.randn', 'torch.randn', (['(64, 4, 4)'], {'dtype': 'torch.float64'}), '((64, 4, 4), dtype=torch.float64)\n', (1953, 1986), False, 'import torch\n'), ((2010, 2054), 'torch.randn', 'torch.randn', (['(64, 4, 4)'], {'dtype': 'torch.float64'}), '((64, 4, 4), dtype=torch.float64)\n', (2021, 2054), False, 'import torch\n'), ((2070, 2107), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(0)', 'eps': '(1e-06)'}), '(dim=0, eps=1e-06)\n', (2089, 2107), True, 'import torch.nn as nn\n'), ((2187, 2213), 'numpy.full', 'np.full', (['(4, 4, 5)', 'np.nan'], {}), '((4, 4, 5), np.nan)\n', (2194, 2213), True, 'import numpy as np\n'), ((2937, 3003), 'mc_cnn.run.computes_cost_volume_mc_cnn_fast', 'computes_cost_volume_mc_cnn_fast', (['ref_feature', 'sec_features', '(-2)', '(2)'], {}), '(ref_feature, sec_features, -2, 2)\n', (2969, 3003), False, 'from mc_cnn.run import computes_cost_volume_mc_cnn_fast\n'), ((3125, 3174), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cv', 'cv_gt'], {'rtol': '(1e-05)'}), '(cv, cv_gt, rtol=1e-05)\n', (3151, 3174), True, 'import numpy as np\n'), ((3427, 3471), 'torch.randn', 'torch.randn', (['(64, 4, 4)'], {'dtype': 'torch.float64'}), '((64, 4, 4), dtype=torch.float64)\n', (3438, 3471), False, 'import torch\n'), ((3495, 3539), 'torch.randn', 'torch.randn', (['(64, 4, 4)'], {'dtype': 'torch.float64'}), '((64, 4, 4), dtype=torch.float64)\n', (3506, 3539), False, 'import torch\n'), ((3555, 3592), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(0)', 'eps': '(1e-06)'}), '(dim=0, eps=1e-06)\n', (3574, 3592), True, 'import torch.nn as nn\n'), ((3672, 3698), 'numpy.full', 'np.full', (['(4, 4, 4)', 'np.nan'], {}), '((4, 4, 4), np.nan)\n', (3679, 3698), True, 'import numpy as np\n'), ((4222, 4289), 'mc_cnn.run.computes_cost_volume_mc_cnn_fast', 'computes_cost_volume_mc_cnn_fast', (['ref_feature', 'sec_features', '(-4)', '(-1)'], {}), '(ref_feature, sec_features, -4, -1)\n', (4254, 4289), False, 'from mc_cnn.run import computes_cost_volume_mc_cnn_fast\n'), ((4411, 4460), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cv', 'cv_gt'], {'rtol': '(1e-05)'}), '(cv, cv_gt, rtol=1e-05)\n', (4437, 4460), True, 'import numpy as np\n'), ((4713, 4757), 'torch.randn', 'torch.randn', (['(64, 4, 4)'], {'dtype': 'torch.float64'}), '((64, 4, 4), dtype=torch.float64)\n', (4724, 4757), False, 'import torch\n'), ((4781, 4825), 'torch.randn', 'torch.randn', (['(64, 4, 4)'], {'dtype': 'torch.float64'}), '((64, 4, 4), dtype=torch.float64)\n', (4792, 4825), False, 'import torch\n'), ((4841, 4878), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(0)', 'eps': '(1e-06)'}), '(dim=0, eps=1e-06)\n', (4860, 4878), True, 'import torch.nn as nn\n'), ((4958, 4984), 'numpy.full', 'np.full', (['(4, 4, 4)', 'np.nan'], {}), '((4, 4, 4), np.nan)\n', (4965, 4984), True, 'import numpy as np\n'), ((5503, 5568), 'mc_cnn.run.computes_cost_volume_mc_cnn_fast', 'computes_cost_volume_mc_cnn_fast', (['ref_feature', 'sec_features', '(1)', '(4)'], {}), '(ref_feature, sec_features, 1, 4)\n', (5535, 5568), False, 'from mc_cnn.run import computes_cost_volume_mc_cnn_fast\n'), ((5690, 5739), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cv', 'cv_gt'], {'rtol': '(1e-05)'}), '(cv, cv_gt, rtol=1e-05)\n', (5716, 5739), True, 'import numpy as np\n'), ((6200, 6248), 'torch.randn', 'torch.randn', (['(1, 112, 4, 4)'], {'dtype': 'torch.float64'}), '((1, 112, 4, 4), dtype=torch.float64)\n', (6211, 6248), False, 'import torch\n'), ((6272, 6320), 'torch.randn', 'torch.randn', (['(1, 112, 4, 4)'], {'dtype': 'torch.float64'}), '((1, 112, 4, 4), dtype=torch.float64)\n', (6283, 6320), False, 'import torch\n'), ((6401, 6427), 'numpy.full', 'np.full', (['(4, 4, 5)', 'np.nan'], {}), '((4, 4, 5), np.nan)\n', (6408, 6427), True, 'import numpy as np\n'), ((7232, 7247), 'mc_cnn.model.mc_cnn_accurate.AccMcCnnInfer', 'AccMcCnnInfer', ([], {}), '()\n', (7245, 7247), False, 'from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer\n'), ((7567, 7616), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cv', 'cv_gt'], {'rtol': '(1e-05)'}), '(cv, cv_gt, rtol=1e-05)\n', (7593, 7616), True, 'import numpy as np\n'), ((7876, 7924), 'torch.randn', 'torch.randn', (['(1, 112, 4, 4)'], {'dtype': 'torch.float64'}), '((1, 112, 4, 4), dtype=torch.float64)\n', (7887, 7924), False, 'import torch\n'), ((7948, 7996), 'torch.randn', 'torch.randn', (['(1, 112, 4, 4)'], {'dtype': 'torch.float64'}), '((1, 112, 4, 4), dtype=torch.float64)\n', (7959, 7996), False, 'import torch\n'), ((8077, 8103), 'numpy.full', 'np.full', (['(4, 4, 4)', 'np.nan'], {}), '((4, 4, 4), np.nan)\n', (8084, 8103), True, 'import numpy as np\n'), ((8676, 8691), 'mc_cnn.model.mc_cnn_accurate.AccMcCnnInfer', 'AccMcCnnInfer', ([], {}), '()\n', (8689, 8691), False, 'from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer\n'), ((9012, 9061), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cv', 'cv_gt'], {'rtol': '(1e-05)'}), '(cv, cv_gt, rtol=1e-05)\n', (9038, 9061), True, 'import numpy as np\n'), ((9322, 9370), 'torch.randn', 'torch.randn', (['(1, 112, 4, 4)'], {'dtype': 'torch.float64'}), '((1, 112, 4, 4), dtype=torch.float64)\n', (9333, 9370), False, 'import torch\n'), ((9394, 9442), 'torch.randn', 'torch.randn', (['(1, 112, 4, 4)'], {'dtype': 'torch.float64'}), '((1, 112, 4, 4), dtype=torch.float64)\n', (9405, 9442), False, 'import torch\n'), ((9523, 9549), 'numpy.full', 'np.full', (['(4, 4, 4)', 'np.nan'], {}), '((4, 4, 4), np.nan)\n', (9530, 9549), True, 'import numpy as np\n'), ((10117, 10132), 'mc_cnn.model.mc_cnn_accurate.AccMcCnnInfer', 'AccMcCnnInfer', ([], {}), '()\n', (10130, 10132), False, 'from mc_cnn.model.mc_cnn_accurate import AccMcCnnInfer\n'), ((10451, 10500), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cv', 'cv_gt'], {'rtol': '(1e-05)'}), '(cv, cv_gt, rtol=1e-05)\n', (10477, 10500), True, 'import numpy as np\n'), ((12976, 13068), 'mc_cnn.dataset_generator.middlebury_generator.MiddleburyGenerator', 'MiddleburyGenerator', (['"""tests/sample_middlebury.hdf5"""', '"""tests/images_middlebury.hdf5"""', 'cfg'], {}), "('tests/sample_middlebury.hdf5',\n 'tests/images_middlebury.hdf5', cfg)\n", (12995, 13068), False, 'from mc_cnn.dataset_generator.middlebury_generator import MiddleburyGenerator\n'), ((14329, 14397), 'numpy.stack', 'np.stack', (['(gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch)'], {'axis': '(0)'}), '((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)\n', (14337, 14397), True, 'import numpy as np\n'), ((14513, 14558), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['patch', 'gt_path'], {}), '(patch, gt_path)\n', (14542, 14558), True, 'import numpy as np\n'), ((15729, 15797), 'numpy.stack', 'np.stack', (['(gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch)'], {'axis': '(0)'}), '((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)\n', (15737, 15797), True, 'import numpy as np\n'), ((15913, 15958), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['patch', 'gt_path'], {}), '(patch, gt_path)\n', (15942, 15958), True, 'import numpy as np\n'), ((18131, 18216), 'mc_cnn.dataset_generator.datas_fusion_contest_generator.DataFusionContestGenerator', 'DataFusionContestGenerator', (['"""tests/sample_dfc.hdf5"""', '"""tests/images_dfc.hdf5"""', 'cfg'], {}), "('tests/sample_dfc.hdf5', 'tests/images_dfc.hdf5',\n cfg)\n", (18157, 18216), False, 'from mc_cnn.dataset_generator.datas_fusion_contest_generator import DataFusionContestGenerator\n'), ((19476, 19544), 'numpy.stack', 'np.stack', (['(gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch)'], {'axis': '(0)'}), '((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)\n', (19484, 19544), True, 'import numpy as np\n'), ((19660, 19705), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['patch', 'gt_path'], {}), '(patch, gt_path)\n', (19689, 19705), True, 'import numpy as np\n'), ((20876, 20944), 'numpy.stack', 'np.stack', (['(gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch)'], {'axis': '(0)'}), '((gt_ref_patch, gt_sec_pos_patch, gt_sec_neg_patch), axis=0)\n', (20884, 20944), True, 'import numpy as np\n'), ((21060, 21105), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['patch', 'gt_path'], {}), '(patch, gt_path)\n', (21089, 21105), True, 'import numpy as np\n'), ((1454, 1485), 'numpy.arange', 'np.arange', (['(13)'], {'dtype': 'np.float32'}), '(13, dtype=np.float32)\n', (1463, 1485), True, 'import numpy as np\n'), ((1609, 1640), 'numpy.arange', 'np.arange', (['(13)'], {'dtype': 'np.float32'}), '(13, dtype=np.float32)\n', (1618, 1640), True, 'import numpy as np\n'), ((1529, 1560), 'numpy.arange', 'np.arange', (['(13)'], {'dtype': 'np.float32'}), '(13, dtype=np.float32)\n', (1538, 1560), True, 'import numpy as np\n'), ((1684, 1715), 'numpy.arange', 'np.arange', (['(13)'], {'dtype': 'np.float32'}), '(13, dtype=np.float32)\n', (1693, 1715), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sqlite3
from sklearn.metrics import auc
from sklearn.metrics import roc_curve
def add_truth(data, database):
data = data.sort_values('event_no').reset_index(drop = True)
with sqlite3.connect(database) as con:
query = 'select event_no, energy, interaction_type, pid from truth where event_no in %s'%str(tuple(data['event_no']))
truth = pd.read_sql(query,con).sort_values('event_no').reset_index(drop = True)
truth['track'] = 0
truth.loc[(abs(truth['pid']) == 14) & (truth['interaction_type'] == 1), 'track'] = 1
add_these = []
for key in truth.columns:
if key not in data.columns:
add_these.append(key)
for key in add_these:
data[key] = truth[key]
return data
def get_interaction_type(row):
if row["interaction_type"] == 1: # CC
particle_type = "nu_" + {12: 'e', 14: 'mu', 16: 'tau'}[abs(row['pid'])]
return f"{particle_type} CC"
else:
return "NC"
def resolution_fn(r):
if len(r) > 1:
return (np.percentile(r, 84) - np.percentile(r, 16)) / 2.
else:
return np.nan
def add_energylog10(df):
df['energy_log10'] = np.log10(df['energy'])
return df
def get_error(residual):
rng = np.random.default_rng(42)
w = []
for i in range(150):
new_sample = rng.choice(residual, size = len(residual), replace = True)
w.append(resolution_fn(new_sample))
return np.std(w)
def get_roc_and_auc(data, target):
fpr, tpr, _ = roc_curve(data[target], data[target+'_pred'])
auc_score = auc(fpr,tpr)
return fpr,tpr,auc_score
def plot_roc(target, runids, save_dir, save_as_csv = False):
width = 3.176*2
height = 2.388*2
fig = plt.figure(figsize = (width,height))
for runid in runids:
data = pd.read_csv('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target))
database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid)
if save_as_csv:
data = add_truth(data, database)
data = add_energylog10(data)
data.to_csv(save_dir + '/%s_%s.csv'%(runid, target))
pulses_cut_val = 20
if runid == 140021:
pulses_cut_val = 10
fpr, tpr, auc = get_roc_and_auc(data, target)
plt.plot(fpr,tpr, label =' %s : %s'%(runid,round(auc,3)))
plt.legend()
plt.title('Track/Cascade Classification')
plt.ylabel('True Positive Rate', fontsize = 12)
plt.xlabel('False Positive Rate', fontsize = 12)
ymax = 0.3
x_text = 0.2
y_text = ymax - 0.05
y_sep = 0.1
plt.text(x_text, y_text - 0 * y_sep, "IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)"%(runids[0], runids[1]), va='top', fontsize = 8)
plt.text(x_text, y_text - 1 * y_sep, "Pulsemaps used: SplitInIcePulses_GraphSage_Pulses ", va='top', fontsize = 8)
plt.text(x_text, y_text - 2 * y_sep, "n_pulses > (%s, %s) selection applied during training"%(10,20), va='top', fontsize = 8)
fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target),bbox_inches="tight")
return
def calculate_width(data_sliced, target):
track =data_sliced.loc[data_sliced['track'] == 1,:].reset_index(drop = True)
cascade =data_sliced.loc[data_sliced['track'] == 0,:].reset_index(drop = True)
if target == 'energy':
residual_track = ((track[target + '_pred'] - track[target])/track[target])*100
residual_cascade = ((cascade[target + '_pred'] - cascade[target])/cascade[target])*100
elif target == 'zenith':
residual_track = (track[target + '_pred'] - track[target])*(360/(2*np.pi))
residual_cascade = (cascade[target + '_pred'] - cascade[target])*(360/(2*np.pi))
else:
residual_track = (track[target + '_pred'] - track[target])
residual_cascade = (cascade[target + '_pred'] - cascade[target])
return resolution_fn(residual_track), resolution_fn(residual_cascade), get_error(residual_track), get_error(residual_cascade)
def get_width(df, target):
track_widths = []
cascade_widths = []
track_errors = []
cascade_errors = []
energy = []
bins = np.arange(0,3.1,0.1)
if target in ['zenith', 'energy', 'XYZ']:
for i in range(1,len(bins)):
print(bins[i])
idx = (df['energy_log10']> bins[i-1]) & (df['energy_log10'] < bins[i])
data_sliced = df.loc[idx, :].reset_index(drop = True)
energy.append(np.mean(data_sliced['energy_log10']))
track_width, cascade_width, track_error, cascade_error = calculate_width(data_sliced, target)
track_widths.append(track_width)
cascade_widths.append(cascade_width)
track_errors.append(track_error)
cascade_errors.append(cascade_error)
track_plot_data = pd.DataFrame({'mean': energy, 'width': track_widths, 'width_error': track_errors})
cascade_plot_data = pd.DataFrame({'mean': energy, 'width': cascade_widths, 'width_error': cascade_errors})
return track_plot_data, cascade_plot_data
else:
print('target not supported: %s'%target)
# Load data
def make_plot(target, runids, save_dir, save_as_csv = False):
colors = {140021: 'tab:blue', 140022: 'tab:orange'}
fig = plt.figure(constrained_layout = True)
ax1 = plt.subplot2grid((6, 6), (0, 0), colspan = 6, rowspan= 6)
for runid in runids:
predictions_path = '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'%(runid,target)
database = '/mnt/scratch/rasmus_orsoe/databases/dev_step4_numu_%s_second_run/data/dev_step4_numu_%s_second_run.db'%(runid, runid)
pulses_cut_val = 20
if runid == 140021:
pulses_cut_val = 10
df = pd.read_csv(predictions_path).sort_values('event_no').reset_index(drop = True)
df = add_truth(df, database)
df = add_energylog10(df)
if save_as_csv:
df.to_csv(save_dir + '/%s_%s.csv'%(runid, target))
plot_data_track, plot_data_cascade = get_width(df, target)
ax1.plot(plot_data_track['mean'],plot_data_track['width'],linestyle='solid', lw = 0.5, color = 'black', alpha = 1)
ax1.fill_between(plot_data_track['mean'],plot_data_track['width'] - plot_data_track['width_error'], plot_data_track['width'] + plot_data_track['width_error'],color = colors[runid], alpha = 0.8 ,label = 'Track %s'%runid)
ax1.plot(plot_data_cascade['mean'],plot_data_cascade['width'],linestyle='dashed', color = 'tab:blue', lw = 0.5, alpha = 1)
ax1.fill_between(plot_data_cascade['mean'], plot_data_cascade['width']- plot_data_cascade['width_error'], plot_data_cascade['width']+ plot_data_cascade['width_error'], color = colors[runid], alpha = 0.3, label = 'Cascade %s'%runid )
ax2 = ax1.twinx()
ax2.hist(df['energy_log10'], histtype = 'step', label = 'deposited energy', color = colors[runid])
#plt.title('$\\nu_{v,u,e}$', size = 20)
ax1.tick_params(axis='x', labelsize=6)
ax1.tick_params(axis='y', labelsize=6)
ax1.set_xlim((0,3.1))
leg = ax1.legend(frameon=False, fontsize = 8)
for line in leg.get_lines():
line.set_linewidth(4.0)
if target == 'energy':
ax1.set_ylim((0,175))
ymax = 23.
y_sep = 8
unit_tag = '(%)'
else:
unit_tag = '(deg.)'
if target == 'angular_res':
target = 'direction'
if target == 'XYZ':
target = 'vertex'
unit_tag = '(m)'
if target == 'zenith':
ymax = 10.
y_sep = 2.3
ax1.set_ylim((0,45))
plt.tick_params(right=False,labelright=False)
ax1.set_ylabel('%s Resolution %s'%(target.capitalize(), unit_tag), size = 10)
ax1.set_xlabel('Energy (log10 GeV)', size = 10)
x_text = 0.5
y_text = ymax - 2.
ax1.text(x_text, y_text - 0 * y_sep, "IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)"%(runids[0], runids[1]), va='top', fontsize = 8)
ax1.text(x_text, y_text - 1 * y_sep, "Pulsemaps used: SplitInIcePulses_GraphSage_Pulses ", va='top', fontsize = 8)
ax1.text(x_text, y_text - 2 * y_sep, "n_pulses > (%s, %s) selection applied during training"%(10,20), va='top', fontsize = 8)
fig.suptitle("%s regression Upgrade MC using GNN"%target)
#fig.suptitle('%s Resolution'%target.capitalize(), size = 12)
fig.savefig('/home/iwsatlas1/oersoe/phd/upgrade_noise/plots/preliminary_upgrade_performance_%s.pdf'%(target))#,bbox_inches="tight")
return
runids = [140021, 140022]
targets = ['zenith', 'energy', 'track']
save_as_csv = True
save_dir = '/home/iwsatlas1/oersoe/phd/tmp/upgrade_csv'
for target in targets:
if target != 'track':
make_plot(target, runids, save_dir, save_as_csv)
else:
plot_roc(target, runids, save_dir, save_as_csv)
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"matplotlib.pyplot.subplot2grid",
"numpy.random.default_rng",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"matplotlib.pyplot.tick_params",
"pandas.DataFrame",
"numpy.std",
"numpy.log10",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.text",
"numpy.percentile",
"sqlite3.connect",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.roc_curve",
"sklearn.metrics.auc",
"pandas.read_sql",
"matplotlib.pyplot.xlabel"
] |
[((1250, 1272), 'numpy.log10', 'np.log10', (["df['energy']"], {}), "(df['energy'])\n", (1258, 1272), True, 'import numpy as np\n'), ((1323, 1348), 'numpy.random.default_rng', 'np.random.default_rng', (['(42)'], {}), '(42)\n', (1344, 1348), True, 'import numpy as np\n'), ((1520, 1529), 'numpy.std', 'np.std', (['w'], {}), '(w)\n', (1526, 1529), True, 'import numpy as np\n'), ((1584, 1631), 'sklearn.metrics.roc_curve', 'roc_curve', (['data[target]', "data[target + '_pred']"], {}), "(data[target], data[target + '_pred'])\n", (1593, 1631), False, 'from sklearn.metrics import roc_curve\n'), ((1646, 1659), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (1649, 1659), False, 'from sklearn.metrics import auc\n'), ((1803, 1838), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (1813, 1838), True, 'import matplotlib.pyplot as plt\n'), ((2568, 2580), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2578, 2580), True, 'import matplotlib.pyplot as plt\n'), ((2585, 2626), 'matplotlib.pyplot.title', 'plt.title', (['"""Track/Cascade Classification"""'], {}), "('Track/Cascade Classification')\n", (2594, 2626), True, 'import matplotlib.pyplot as plt\n'), ((2631, 2676), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {'fontsize': '(12)'}), "('True Positive Rate', fontsize=12)\n", (2641, 2676), True, 'import matplotlib.pyplot as plt\n'), ((2683, 2729), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {'fontsize': '(12)'}), "('False Positive Rate', fontsize=12)\n", (2693, 2729), True, 'import matplotlib.pyplot as plt\n'), ((2809, 2956), 'matplotlib.pyplot.text', 'plt.text', (['x_text', '(y_text - 0 * y_sep)', "('IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)' % (runids[0], runids[1])\n )"], {'va': '"""top"""', 'fontsize': '(8)'}), "(x_text, y_text - 0 * y_sep, \n 'IceCubeUpgrade/nu_simulation/detector/step4/(%s,%s)' % (runids[0],\n runids[1]), va='top', fontsize=8)\n", (2817, 2956), True, 'import matplotlib.pyplot as plt\n'), ((2952, 3068), 'matplotlib.pyplot.text', 'plt.text', (['x_text', '(y_text - 1 * y_sep)', '"""Pulsemaps used: SplitInIcePulses_GraphSage_Pulses """'], {'va': '"""top"""', 'fontsize': '(8)'}), "(x_text, y_text - 1 * y_sep,\n 'Pulsemaps used: SplitInIcePulses_GraphSage_Pulses ', va='top', fontsize=8)\n", (2960, 3068), True, 'import matplotlib.pyplot as plt\n'), ((3071, 3207), 'matplotlib.pyplot.text', 'plt.text', (['x_text', '(y_text - 2 * y_sep)', "('n_pulses > (%s, %s) selection applied during training' % (10, 20))"], {'va': '"""top"""', 'fontsize': '(8)'}), "(x_text, y_text - 2 * y_sep, \n 'n_pulses > (%s, %s) selection applied during training' % (10, 20), va=\n 'top', fontsize=8)\n", (3079, 3207), True, 'import matplotlib.pyplot as plt\n'), ((4393, 4415), 'numpy.arange', 'np.arange', (['(0)', '(3.1)', '(0.1)'], {}), '(0, 3.1, 0.1)\n', (4402, 4415), True, 'import numpy as np\n'), ((5514, 5549), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (5524, 5549), True, 'import matplotlib.pyplot as plt\n'), ((5562, 5616), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(6, 6)', '(0, 0)'], {'colspan': '(6)', 'rowspan': '(6)'}), '((6, 6), (0, 0), colspan=6, rowspan=6)\n', (5578, 5616), True, 'import matplotlib.pyplot as plt\n'), ((7910, 7956), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'right': '(False)', 'labelright': '(False)'}), '(right=False, labelright=False)\n', (7925, 7956), True, 'import matplotlib.pyplot as plt\n'), ((262, 287), 'sqlite3.connect', 'sqlite3.connect', (['database'], {}), '(database)\n', (277, 287), False, 'import sqlite3\n'), ((1880, 2054), 'pandas.read_csv', 'pd.read_csv', (["('/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'\n % (runid, target))"], {}), "(\n '/home/iwsatlas1/oersoe/phd/upgrade_noise/results/dev_step4_numu_%s_second_run/upgrade_%s_regression_45e_GraphSagePulses/results.csv'\n % (runid, target))\n", (1891, 2054), True, 'import pandas as pd\n'), ((5058, 5144), 'pandas.DataFrame', 'pd.DataFrame', (["{'mean': energy, 'width': track_widths, 'width_error': track_errors}"], {}), "({'mean': energy, 'width': track_widths, 'width_error':\n track_errors})\n", (5070, 5144), True, 'import pandas as pd\n'), ((5169, 5259), 'pandas.DataFrame', 'pd.DataFrame', (["{'mean': energy, 'width': cascade_widths, 'width_error': cascade_errors}"], {}), "({'mean': energy, 'width': cascade_widths, 'width_error':\n cascade_errors})\n", (5181, 5259), True, 'import pandas as pd\n'), ((1109, 1129), 'numpy.percentile', 'np.percentile', (['r', '(84)'], {}), '(r, 84)\n', (1122, 1129), True, 'import numpy as np\n'), ((1132, 1152), 'numpy.percentile', 'np.percentile', (['r', '(16)'], {}), '(r, 16)\n', (1145, 1152), True, 'import numpy as np\n'), ((4700, 4736), 'numpy.mean', 'np.mean', (["data_sliced['energy_log10']"], {}), "(data_sliced['energy_log10'])\n", (4707, 4736), True, 'import numpy as np\n'), ((438, 461), 'pandas.read_sql', 'pd.read_sql', (['query', 'con'], {}), '(query, con)\n', (449, 461), True, 'import pandas as pd\n'), ((6060, 6089), 'pandas.read_csv', 'pd.read_csv', (['predictions_path'], {}), '(predictions_path)\n', (6071, 6089), True, 'import pandas as pd\n')]
|
"""
handle the CLI logic for a unidump call
"""
import argparse
import codecs
import gettext
from os.path import dirname
from shutil import get_terminal_size
import sys
from textwrap import TextWrapper
# pylint: disable=unused-import
from typing import List, IO, Any
# pylint: enable=unused-import
from unicodedata import unidata_version
from unidump import VERSION, unidump
from unidump.env import Env
TL = gettext.translation('unidump', localedir=dirname(__file__)+'/locale',
fallback=True)
_ = TL.gettext
TW = TextWrapper(width=min(80, getattr(get_terminal_size(), 'columns')),
replace_whitespace=True,
initial_indent=' ', subsequent_indent=' ').fill
DESCRIPTION = '\n\n'.join([
TW(_('A Unicode code point dump.')),
TW(_('Think of it as hexdump(1) for Unicode. The command analyses the '
'input and then prints three columns: the raw byte index of the '
'first code point in this row, code points in their hex notation, '
'and finally the raw input characters with control and whitespace '
'replaced by a dot.')),
TW(_('Invalid byte sequences are represented with an “X” and with the hex '
'value enclosed in question marks, e.g., “?F5?”.')),
TW(_('You can pipe in data from stdin, select several files at once, or '
'even mix all those input methods together.')),
])
EPILOG = '\n\n'.join([
_('Examples:'),
TW(_('* Basic usage with stdin:')),
''' echo -n 'ABCDEFGHIJKLMNOP' | unidump -n 4
0 0041 0042 0043 0044 ABCD
4 0045 0046 0047 0048 EFGH
8 0049 004A 004B 004C IJKL
12 004D 004E 004F 0050 MNOP''',
TW(_('* Dump the code points translated from another encoding:')),
' unidump -c latin-1 some-legacy-file',
TW(_('* Dump many files at the same time:')),
' unidump foo-*.txt',
TW(_('* Control characters and whitespace are safely rendered:')),
''' echo -n -e '\\x01' | unidump -n 1
0 0001 .''',
TW(_('* Finally learn what your favorite Emoji is composed of:')),
''' ( echo -n -e '\\xf0\\x9f\\xa7\\x9d\\xf0\\x9f\\x8f\\xbd\\xe2' ; \\
echo -n -e '\\x80\\x8d\\xe2\\x99\\x82\\xef\\xb8\\x8f' ; ) | \\
unidump -n 5
0 1F9DD 1F3FD 200D 2642 FE0F .🏽.♂️''',
TW(_('See <http://emojipedia.org/man-elf-medium-skin-tone/> for images. '
'The “elf” emoji (the first character) is replaced with a dot here, '
'because the current version of Python’s unicodedata doesn’t know of '
'this character yet.')),
TW(_('* Use it like strings(1):')),
' unidump -e \'{data}\' some-file.bin',
TW(_('This will replace every unknown byte from the input file with “X” '
'and every control and whitespace character with “.”.')),
TW(_('* Only print the code points of the input:')),
''' unidump -e '{repr}'$'\\n' -n 1 some-file.txt''',
TW(_('This results in a stream of code points in hex notation, each on a '
'new line, without byte counter or rendering of actual data. You can '
'use this to count the total amount of characters (as opposed to raw '
'bytes) in a file, if you pipe it through `wc -l`.')),
TW(_('This is version {} of unidump, using Unicode {} data.')
.format(VERSION, unidata_version)).lstrip() + '\n'
])
def force_stdout_to_utf8():
"""force stdout to be UTF-8 encoded, disregarding locale
Do not type-check this:
error: Incompatible types in assignment (expression has type
"StreamWriter", variable has type "TextIO")
error: "TextIO" has no attribute "detach"
\\o/
"""
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
def main(args: List[str] = None) -> int:
"""entry-point for an unidump CLI call"""
force_stdout_to_utf8()
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(
prog='unidump',
description=DESCRIPTION,
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('files', nargs='*', metavar='FILE', default=('-',),
help=_(
'input files. Use “-” or keep empty for stdin.'))
parser.add_argument('-n', '--length', type=int, default=16,
dest='linelength', metavar='LENGTH',
help=_(
'format output using this much input characters. '
'Default is %(default)s characters.'))
parser.add_argument('-c', '--encoding', type=str, default='utf-8',
metavar='ENC',
help=_(
'interpret input in this encoding. Default is '
'%(default)s. You can choose any encoding that '
'Python supports, e.g. “latin-1”.'))
parser.add_argument('-e', '--format', type=str, default=None,
dest='lineformat', metavar='FORMAT',
help=_(
'specify a custom format in Python’s {} notation. '
'Default is “%s”. '
'See examples below on how to use this option.'
) % Env.lineformat.replace('\n', '\\n'))
parser.add_argument('-v', '--version', action='version',
version=_('%(prog)s {} using Unicode {} data').format(
VERSION, unidata_version))
options = parser.parse_args(args)
try:
for filename in options.files:
infile = None # type: IO[bytes]
if filename == '-':
infile = sys.stdin.buffer
else:
try:
infile = open(filename, 'rb')
except FileNotFoundError:
sys.stdout.flush()
sys.stderr.write(_('File {} not found.\n')
.format(filename))
continue
except IsADirectoryError:
sys.stdout.flush()
sys.stderr.write(_('{} is a directory.\n')
.format(filename))
continue
unidump(
infile,
env=Env(
linelength=options.linelength,
encoding=options.encoding,
lineformat=options.lineformat,
output=sys.stdout))
except KeyboardInterrupt:
sys.stdout.flush()
return 1
else:
return 0
|
[
"sys.stdout.detach",
"argparse.ArgumentParser",
"unidump.env.Env.lineformat.replace",
"os.path.dirname",
"shutil.get_terminal_size",
"codecs.getwriter",
"unidump.env.Env",
"sys.stdout.flush"
] |
[((4000, 4138), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""unidump"""', 'description': 'DESCRIPTION', 'epilog': 'EPILOG', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), "(prog='unidump', description=DESCRIPTION, epilog=\n EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter)\n", (4023, 4138), False, 'import argparse\n'), ((3772, 3797), 'codecs.getwriter', 'codecs.getwriter', (['"""utf-8"""'], {}), "('utf-8')\n", (3788, 3797), False, 'import codecs\n'), ((3798, 3817), 'sys.stdout.detach', 'sys.stdout.detach', ([], {}), '()\n', (3815, 3817), False, 'import sys\n'), ((454, 471), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (461, 471), False, 'from os.path import dirname\n'), ((6696, 6714), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6712, 6714), False, 'import sys\n'), ((5413, 5448), 'unidump.env.Env.lineformat.replace', 'Env.lineformat.replace', (['"""\n"""', '"""\\\\n"""'], {}), "('\\n', '\\\\n')\n", (5435, 5448), False, 'from unidump.env import Env\n'), ((577, 596), 'shutil.get_terminal_size', 'get_terminal_size', ([], {}), '()\n', (594, 596), False, 'from shutil import get_terminal_size\n'), ((6464, 6580), 'unidump.env.Env', 'Env', ([], {'linelength': 'options.linelength', 'encoding': 'options.encoding', 'lineformat': 'options.lineformat', 'output': 'sys.stdout'}), '(linelength=options.linelength, encoding=options.encoding, lineformat=\n options.lineformat, output=sys.stdout)\n', (6467, 6580), False, 'from unidump.env import Env\n'), ((6003, 6021), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6019, 6021), False, 'import sys\n'), ((6232, 6250), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6248, 6250), False, 'import sys\n')]
|
from rest_framework import serializers
from api.models import *
class ParliamentaryGroupSerializer(serializers.ModelSerializer):
class Meta:
model = ParliamentaryGroup
fields = ('id', 'name')
class ParliamentarySessionSerializer(serializers.ModelSerializer):
class Meta:
model = ParliamentarySession
fields = ('session_date',)
class CouncilPersonSerializer(serializers.ModelSerializer):
class Meta:
model = CouncilPerson
fields = ('name', 'academic_degree', 'email', 'parliamentary_group')
class FileSerializer(serializers.ModelSerializer):
class Meta:
model = File
fields = ('long_filename', 'short_filename', 'path')
class AnswerSerializer(serializers.ModelSerializer):
session = serializers.StringRelatedField()
proposer = CouncilPersonSerializer()
files = FileSerializer(many=True)
class Meta:
model = Motion
fields = ('id', 'motion_id', 'session', 'title', 'parliamentary_group',
'proposer', 'files')
class MotionSerializer(serializers.ModelSerializer):
session = serializers.StringRelatedField()
proposer = CouncilPersonSerializer()
files = FileSerializer(many=True)
answers = AnswerSerializer(many=True)
class Meta:
model = Motion
fields = ('id', 'motion_id', 'session', 'title', 'motion_type', 'parliamentary_group',
'proposer', 'files', 'answers')
|
[
"rest_framework.serializers.StringRelatedField"
] |
[((776, 808), 'rest_framework.serializers.StringRelatedField', 'serializers.StringRelatedField', ([], {}), '()\n', (806, 808), False, 'from rest_framework import serializers\n'), ((1116, 1148), 'rest_framework.serializers.StringRelatedField', 'serializers.StringRelatedField', ([], {}), '()\n', (1146, 1148), False, 'from rest_framework import serializers\n')]
|
from django.db import models
from pytz import country_names as c
from datetime import date
dict_choices = dict(c)
_choices = []
_keys = list(dict_choices.keys())
_value = list(dict_choices.values())
if len(_keys) == len(_value):
for i in range(len(_keys)):
a = [_keys[i], _value[i]]
_choices.append(tuple(a))
class StudentProfile(models.Model):
Name = models.CharField(max_length=300)
Application_Number = models.BigIntegerField()
Date_Of_Birth = models.DateField()
Gender = models.CharField(
max_length=30,
choices=[
("M", "Male"),
("F", "Female"),
("N", "Non-Binary"),
("W", "Would not like to reveal"),
],
)
HomeState = models.CharField(max_length=300)
Country = models.CharField(max_length=75, choices=_choices)
ContactNumber = models.BigIntegerField()
class ContactUs(models.Model):
Department_Name = models.CharField(max_length=300)
Department_Head = models.CharField(max_length=300)
Department_ContactDetails = models.IntegerField()
class Meta:
verbose_name_plural = "Contact Us"
class Events(models.Model):
Event_Name = models.CharField(max_length=50)
Event_Head = models.ForeignKey(StudentProfile, on_delete=models.DO_NOTHING)
Event_Duration = models.DurationField()
Event_Descripton = models.TextField(null=False, default="Empty Description")
class Meta:
verbose_name_plural = "Events and Notices"
|
[
"django.db.models.TextField",
"django.db.models.BigIntegerField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.DurationField",
"django.db.models.IntegerField",
"django.db.models.DateField"
] |
[((396, 428), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (412, 428), False, 'from django.db import models\n'), ((455, 479), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {}), '()\n', (477, 479), False, 'from django.db import models\n'), ((501, 519), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (517, 519), False, 'from django.db import models\n'), ((534, 668), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'choices': "[('M', 'Male'), ('F', 'Female'), ('N', 'Non-Binary'), ('W',\n 'Would not like to reveal')]"}), "(max_length=30, choices=[('M', 'Male'), ('F', 'Female'), (\n 'N', 'Non-Binary'), ('W', 'Would not like to reveal')])\n", (550, 668), False, 'from django.db import models\n'), ((771, 803), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (787, 803), False, 'from django.db import models\n'), ((819, 868), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(75)', 'choices': '_choices'}), '(max_length=75, choices=_choices)\n', (835, 868), False, 'from django.db import models\n'), ((890, 914), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {}), '()\n', (912, 914), False, 'from django.db import models\n'), ((974, 1006), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (990, 1006), False, 'from django.db import models\n'), ((1030, 1062), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (1046, 1062), False, 'from django.db import models\n'), ((1096, 1117), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1115, 1117), False, 'from django.db import models\n'), ((1232, 1263), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1248, 1263), False, 'from django.db import models\n'), ((1282, 1344), 'django.db.models.ForeignKey', 'models.ForeignKey', (['StudentProfile'], {'on_delete': 'models.DO_NOTHING'}), '(StudentProfile, on_delete=models.DO_NOTHING)\n', (1299, 1344), False, 'from django.db import models\n'), ((1367, 1389), 'django.db.models.DurationField', 'models.DurationField', ([], {}), '()\n', (1387, 1389), False, 'from django.db import models\n'), ((1414, 1471), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(False)', 'default': '"""Empty Description"""'}), "(null=False, default='Empty Description')\n", (1430, 1471), False, 'from django.db import models\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.