code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import base64
import hashlib
from Crypto import Random
from Crypto.Cipher import AES
'''
Encryption
@description For arbitrary encryption and decryption of data
@author <NAME>
Usage:
e = Encryption()
encrypted_string = e.encrypt("Encrypt me!", "password")
decrypted = e.decrypt(encrypted_string, "password")
'''
class Encryption:
def __init__(self):
self.bs = 16
def encrypt(self, plaintext, password):
plaintext = self.pad(plaintext)
iv = Random.new().read(self.bs)
key = hashlib.sha256(password).hexdigest()[:32]
cipher = AES.new(key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(plaintext))
def decrypt(self, ciphertext, password):
key = hashlib.sha256(password).hexdigest()[:32]
ciphertext = base64.b64decode(ciphertext)
iv = ciphertext[:16]
cipher = AES.new(key, AES.MODE_CBC, iv)
decrypt = self.unpad(cipher.decrypt(ciphertext[16:]))
return decrypt
def pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
def unpad(self, s):
return s[:-ord(s[len(s)-1:])]
|
[
"hashlib.sha256",
"Crypto.Random.new",
"Crypto.Cipher.AES.new",
"base64.b64decode"
] |
[((569, 599), 'Crypto.Cipher.AES.new', 'AES.new', (['key', 'AES.MODE_CBC', 'iv'], {}), '(key, AES.MODE_CBC, iv)\n', (576, 599), False, 'from Crypto.Cipher import AES\n'), ((766, 794), 'base64.b64decode', 'base64.b64decode', (['ciphertext'], {}), '(ciphertext)\n', (782, 794), False, 'import base64\n'), ((829, 859), 'Crypto.Cipher.AES.new', 'AES.new', (['key', 'AES.MODE_CBC', 'iv'], {}), '(key, AES.MODE_CBC, iv)\n', (836, 859), False, 'from Crypto.Cipher import AES\n'), ((481, 493), 'Crypto.Random.new', 'Random.new', ([], {}), '()\n', (491, 493), False, 'from Crypto import Random\n'), ((516, 540), 'hashlib.sha256', 'hashlib.sha256', (['password'], {}), '(password)\n', (530, 540), False, 'import hashlib\n'), ((709, 733), 'hashlib.sha256', 'hashlib.sha256', (['password'], {}), '(password)\n', (723, 733), False, 'import hashlib\n')]
|
import pandas as pd
import os
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if directory and not os.path.exists(directory):
print("Creating new directory", directory)
os.makedirs(directory)
import json
def get_cfg_str(x):
# json.dumps(r.to_dict(), sort_keys=True, separators = (',', '~'))[1:-1]
# It seems DoIt does not allow equal (=) char in task name
return ",".join(['{}~{}'.format(k,v) for (k,v) in sorted(x.to_dict().items()) if k not in ['JUDI', 'name']])
def combine_csvs_base(params, infiles, outfile):
df = pd.DataFrame()
for indx, r in params.assign(infile = infiles).iterrows():
tmp = pd.read_csv(r['infile'])
for col in params.columns:
tmp[col] = r[col]
df = df.append(tmp, ignore_index=True)
df.to_csv(outfile, index=False)
def combine_csvs(big, small):
infiles = big['path'].tolist()
outfile = small['path'].tolist()[0]
params = big.drop(columns=['name', 'path'])
combine_csvs_base(params, infiles, outfile)
from PyPDF2 import PdfFileMerger
def merge_pdfs_base(infiles, outfile):
merger = PdfFileMerger()
for pdf in infiles:
merger.append(open(pdf, 'rb'))
with open(outfile, 'wb') as fout:
merger.write(fout)
def merge_pdfs(big, small):
infiles = big['path'].tolist()
outfile = small['path'].tolist()[0]
merge_pdfs_base(infiles, outfile)
|
[
"pandas.DataFrame",
"os.makedirs",
"pandas.read_csv",
"os.path.dirname",
"os.path.exists",
"PyPDF2.PdfFileMerger"
] |
[((72, 98), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (87, 98), False, 'import os\n'), ((562, 576), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (574, 576), True, 'import pandas as pd\n'), ((1086, 1101), 'PyPDF2.PdfFileMerger', 'PdfFileMerger', ([], {}), '()\n', (1099, 1101), False, 'from PyPDF2 import PdfFileMerger\n'), ((200, 222), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (211, 222), False, 'import os\n'), ((648, 672), 'pandas.read_csv', 'pd.read_csv', (["r['infile']"], {}), "(r['infile'])\n", (659, 672), True, 'import pandas as pd\n'), ((122, 147), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (136, 147), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: create_task.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='create_task.proto',
package='patch_task',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x11\x63reate_task.proto\x12\npatch_task\"\xa8\x01\n\x16\x43reatePatchTaskRequest\x12;\n\x07request\x18\x01 \x03(\x0b\x32*.patch_task.CreatePatchTaskRequest.Request\x12\x11\n\tgroupSize\x18\x02 \x01(\x05\x1a>\n\x07Request\x12\x0e\n\x06hostId\x18\x01 \x01(\t\x12\x0e\n\x06hostIp\x18\x02 \x01(\t\x12\x13\n\x0bpatchIdList\x18\x03 \x03(\t\")\n\x17\x43reatePatchTaskResponse\x12\x0e\n\x06taskId\x18\x01 \x01(\t\"\x85\x01\n\x1e\x43reatePatchTaskResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x31\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32#.patch_task.CreatePatchTaskResponseb\x06proto3')
)
_CREATEPATCHTASKREQUEST_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='patch_task.CreatePatchTaskRequest.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hostId', full_name='patch_task.CreatePatchTaskRequest.Request.hostId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hostIp', full_name='patch_task.CreatePatchTaskRequest.Request.hostIp', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='patchIdList', full_name='patch_task.CreatePatchTaskRequest.Request.patchIdList', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=140,
serialized_end=202,
)
_CREATEPATCHTASKREQUEST = _descriptor.Descriptor(
name='CreatePatchTaskRequest',
full_name='patch_task.CreatePatchTaskRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request', full_name='patch_task.CreatePatchTaskRequest.request', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='groupSize', full_name='patch_task.CreatePatchTaskRequest.groupSize', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CREATEPATCHTASKREQUEST_REQUEST, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=202,
)
_CREATEPATCHTASKRESPONSE = _descriptor.Descriptor(
name='CreatePatchTaskResponse',
full_name='patch_task.CreatePatchTaskResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='taskId', full_name='patch_task.CreatePatchTaskResponse.taskId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=204,
serialized_end=245,
)
_CREATEPATCHTASKRESPONSEWRAPPER = _descriptor.Descriptor(
name='CreatePatchTaskResponseWrapper',
full_name='patch_task.CreatePatchTaskResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='patch_task.CreatePatchTaskResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='patch_task.CreatePatchTaskResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='patch_task.CreatePatchTaskResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='patch_task.CreatePatchTaskResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=248,
serialized_end=381,
)
_CREATEPATCHTASKREQUEST_REQUEST.containing_type = _CREATEPATCHTASKREQUEST
_CREATEPATCHTASKREQUEST.fields_by_name['request'].message_type = _CREATEPATCHTASKREQUEST_REQUEST
_CREATEPATCHTASKRESPONSEWRAPPER.fields_by_name['data'].message_type = _CREATEPATCHTASKRESPONSE
DESCRIPTOR.message_types_by_name['CreatePatchTaskRequest'] = _CREATEPATCHTASKREQUEST
DESCRIPTOR.message_types_by_name['CreatePatchTaskResponse'] = _CREATEPATCHTASKRESPONSE
DESCRIPTOR.message_types_by_name['CreatePatchTaskResponseWrapper'] = _CREATEPATCHTASKRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreatePatchTaskRequest = _reflection.GeneratedProtocolMessageType('CreatePatchTaskRequest', (_message.Message,), {
'Request' : _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), {
'DESCRIPTOR' : _CREATEPATCHTASKREQUEST_REQUEST,
'__module__' : 'create_task_pb2'
# @@protoc_insertion_point(class_scope:patch_task.CreatePatchTaskRequest.Request)
})
,
'DESCRIPTOR' : _CREATEPATCHTASKREQUEST,
'__module__' : 'create_task_pb2'
# @@protoc_insertion_point(class_scope:patch_task.CreatePatchTaskRequest)
})
_sym_db.RegisterMessage(CreatePatchTaskRequest)
_sym_db.RegisterMessage(CreatePatchTaskRequest.Request)
CreatePatchTaskResponse = _reflection.GeneratedProtocolMessageType('CreatePatchTaskResponse', (_message.Message,), {
'DESCRIPTOR' : _CREATEPATCHTASKRESPONSE,
'__module__' : 'create_task_pb2'
# @@protoc_insertion_point(class_scope:patch_task.CreatePatchTaskResponse)
})
_sym_db.RegisterMessage(CreatePatchTaskResponse)
CreatePatchTaskResponseWrapper = _reflection.GeneratedProtocolMessageType('CreatePatchTaskResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _CREATEPATCHTASKRESPONSEWRAPPER,
'__module__' : 'create_task_pb2'
# @@protoc_insertion_point(class_scope:patch_task.CreatePatchTaskResponseWrapper)
})
_sym_db.RegisterMessage(CreatePatchTaskResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.reflection.GeneratedProtocolMessageType"
] |
[((465, 491), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (489, 491), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((8242, 8414), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""CreatePatchTaskResponse"""', '(_message.Message,)', "{'DESCRIPTOR': _CREATEPATCHTASKRESPONSE, '__module__': 'create_task_pb2'}"], {}), "('CreatePatchTaskResponse', (\n _message.Message,), {'DESCRIPTOR': _CREATEPATCHTASKRESPONSE,\n '__module__': 'create_task_pb2'})\n", (8282, 8414), True, 'from google.protobuf import reflection as _reflection\n'), ((8576, 8761), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""CreatePatchTaskResponseWrapper"""', '(_message.Message,)', "{'DESCRIPTOR': _CREATEPATCHTASKRESPONSEWRAPPER, '__module__': 'create_task_pb2'\n }"], {}), "('CreatePatchTaskResponseWrapper',\n (_message.Message,), {'DESCRIPTOR': _CREATEPATCHTASKRESPONSEWRAPPER,\n '__module__': 'create_task_pb2'})\n", (8616, 8761), True, 'from google.protobuf import reflection as _reflection\n'), ((7692, 7855), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""Request"""', '(_message.Message,)', "{'DESCRIPTOR': _CREATEPATCHTASKREQUEST_REQUEST, '__module__': 'create_task_pb2'\n }"], {}), "('Request', (_message.Message,), {\n 'DESCRIPTOR': _CREATEPATCHTASKREQUEST_REQUEST, '__module__':\n 'create_task_pb2'})\n", (7732, 7855), True, 'from google.protobuf import reflection as _reflection\n'), ((2338, 2706), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""patchIdList"""', 'full_name': '"""patch_task.CreatePatchTaskRequest.Request.patchIdList"""', 'index': '(2)', 'number': '(3)', 'type': '(9)', 'cpp_type': '(9)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='patchIdList', full_name=\n 'patch_task.CreatePatchTaskRequest.Request.patchIdList', index=2,\n number=3, type=9, cpp_type=9, label=3, has_default_value=False,\n default_value=[], message_type=None, enum_type=None, containing_type=\n None, is_extension=False, extension_scope=None, serialized_options=None,\n file=DESCRIPTOR)\n", (2365, 2706), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3151, 3501), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""request"""', 'full_name': '"""patch_task.CreatePatchTaskRequest.request"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='request', full_name=\n 'patch_task.CreatePatchTaskRequest.request', index=0, number=1, type=11,\n cpp_type=10, label=3, has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (3178, 3501), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3526, 3878), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""groupSize"""', 'full_name': '"""patch_task.CreatePatchTaskRequest.groupSize"""', 'index': '(1)', 'number': '(2)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='groupSize', full_name=\n 'patch_task.CreatePatchTaskRequest.groupSize', index=1, number=2, type=\n 5, cpp_type=1, label=1, has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (3553, 3878), True, 'from google.protobuf import descriptor as _descriptor\n'), ((5203, 5552), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""code"""', 'full_name': '"""patch_task.CreatePatchTaskResponseWrapper.code"""', 'index': '(0)', 'number': '(1)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='code', full_name=\n 'patch_task.CreatePatchTaskResponseWrapper.code', index=0, number=1,\n type=5, cpp_type=1, label=1, has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (5230, 5552), True, 'from google.protobuf import descriptor as _descriptor\n'), ((6383, 6742), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""data"""', 'full_name': '"""patch_task.CreatePatchTaskResponseWrapper.data"""', 'index': '(3)', 'number': '(4)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='data', full_name=\n 'patch_task.CreatePatchTaskResponseWrapper.data', index=3, number=4,\n type=11, cpp_type=10, label=1, has_default_value=False, default_value=\n None, message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None, serialized_options=None, file\n =DESCRIPTOR)\n", (6410, 6742), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
#!/usr/bin/python
import sys, json, os, datetime
import logging, logging.handlers
import splunk.entity as entity
import splunk
import requests
# Tower Connect
#
# This script is used as wrapper to connect to Ansible Tower API.
## Original from:
# __author__ = "<NAME>"
# __email__ = "<EMAIL>"
# __version__ = "1.0"
# Refactored By:
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "1.0"
# Setup Logger
def setup_logging():
logger = logging.getLogger('splunk.tower_api')
SPLUNK_HOME = os.environ['SPLUNK_HOME']
LOGGING_DEFAULT_CONFIG_FILE = os.path.join(SPLUNK_HOME, 'etc', 'log.cfg')
LOGGING_LOCAL_CONFIG_FILE = os.path.join(SPLUNK_HOME, 'etc', 'log-local.cfg')
LOGGING_STANZA_NAME = 'python'
LOGGING_FILE_NAME = "tower_api.log"
BASE_LOG_PATH = os.path.join('var', 'log', 'splunk')
LOGGING_FORMAT = "%(asctime)s %(levelname)-s\t%(module)s:%(lineno)d - %(message)s"
splunk_log_handler = logging.handlers.RotatingFileHandler(os.path.join(SPLUNK_HOME, BASE_LOG_PATH, LOGGING_FILE_NAME), mode='a')
splunk_log_handler.setFormatter(logging.Formatter(LOGGING_FORMAT))
logger.addHandler(splunk_log_handler)
splunk.setupSplunkLogger(logger, LOGGING_DEFAULT_CONFIG_FILE, LOGGING_LOCAL_CONFIG_FILE, LOGGING_STANZA_NAME)
return logger
#Securely retrieve Ansible Tower Credentials from Splunk REST API password endpoint
def getCredentials(sessionKey,realm):
''' Get Tower Credentials from Splunk '''
myapp = 'splunk-alert_ansible-tower-master'
try:
# list all credentials
entities = entity.getEntities(['admin', 'passwords'], namespace=myapp,
owner='nobody', sessionKey=sessionKey)
except Exception as e:
logger.error("Could not get %s credentials from splunk. Error: %s"
% (myapp, str(e)))
raise e
# return first set of credentials
for i, c in entities.items():
if c.get('realm') == realm:
return c['username'], c['clear_password']
logger.error("ERROR: No credentials have been found")
def tower_get_job_launch_link(hostname,username,password,job_name):
''' Get Job Launch Link from Tower API based on Name '''
logger.info("Job Name: {}".format(job_name))
#Attempt to get launch link
try:
req = requests.get(
url = 'https://{}/api/v2/unified_job_templates/?name={}'.format(hostname,job_name),
headers = {
"Content-Type": "application/json",
},
verify = False,
auth = (username, password),
)
req.raise_for_status()
results = req.json()
logger.info("Unified Jobs Found: {}".format(results))
if results['count'] != 1:
logger.warn('There was {} templates found with the name of {}'.format(results['count'],job_name))
launch_link = results['results'][0]['related']['launch']
logger.info("Launch Link: {}".format(launch_link))
return launch_link
except Exception as error:
logger.error(error)
raise error
def tower_launch(hostname,username,password,job_name,extra_vars):
''' Launch Tower Job '''
launch_link = tower_get_job_launch_link(hostname, username, password, job_name)
post_data = {
"url": "https://{}{}".format(hostname,launch_link),
"headers": {
"Content-Type": "application/json",
"Accept": "application/json",
},
"verify": False,
"auth": (username, password),
}
if extra_vars != None:
data = {}
data['extra_vars'] = json.loads(extra_vars)
post_data['data'] = json.dumps(data)
#Attempt to Launch Ansible Tower Job Template
try:
req = requests.post(**post_data)
results = req.json()
logger.info("Job Info: {}".format(results))
req.raise_for_status()
except Exception as error:
logger.error(error)
raise error
def main(payload):
#Setup Logger
global logger
#Retrieve session key from payload to authenticate to Splunk REST API for secure credential retrieval
sessionKey = payload.get('session_key')
#Retrieve Ansible Tower Hostname from Payload configuration
hostname = payload['configuration'].get('hostname')
#Retrieve Ansible Tower Job Template ID from Payload configuration
job_name = payload['configuration'].get('job_name')
#Retrieve realm from Payload configuration
realm = payload['configuration'].get('realm')
#Retrive Ansible Tower Credentials from Splunk REST API
username, password = getCredentials(sessionKey,realm)
#Retrieve Extra Variables from Splunk REST API - Future Add to add Extra Variable Support
extra_vars = payload['configuration'].get('extra_var')
#Submit Ansible Tower Job
tower_launch(hostname,username,password,job_name,extra_vars)
if __name__ == "__main__":
logger = setup_logging()
# Check if script initiated with --execute
if len(sys.argv) < 2 or sys.argv[1] != "--execute":
#print >> sys.stderr, "FATAL Unsupported execution mode (expected --execute flag)"
sys.exit(1)
else:
#Get Payload
payload = json.loads(sys.stdin.read())
logger.info("Job Started")
#Pass Pass Payload to main function
main(payload)
logger.info("Job Completed")
|
[
"sys.stdin.read",
"json.loads",
"splunk.setupSplunkLogger",
"sys.exit",
"splunk.entity.getEntities",
"json.dumps",
"logging.Formatter",
"requests.post",
"os.path.join",
"logging.getLogger"
] |
[((447, 484), 'logging.getLogger', 'logging.getLogger', (['"""splunk.tower_api"""'], {}), "('splunk.tower_api')\n", (464, 484), False, 'import logging, logging.handlers\n'), ((557, 600), 'os.path.join', 'os.path.join', (['SPLUNK_HOME', '"""etc"""', '"""log.cfg"""'], {}), "(SPLUNK_HOME, 'etc', 'log.cfg')\n", (569, 600), False, 'import sys, json, os, datetime\n'), ((630, 679), 'os.path.join', 'os.path.join', (['SPLUNK_HOME', '"""etc"""', '"""log-local.cfg"""'], {}), "(SPLUNK_HOME, 'etc', 'log-local.cfg')\n", (642, 679), False, 'import sys, json, os, datetime\n'), ((766, 802), 'os.path.join', 'os.path.join', (['"""var"""', '"""log"""', '"""splunk"""'], {}), "('var', 'log', 'splunk')\n", (778, 802), False, 'import sys, json, os, datetime\n'), ((1126, 1239), 'splunk.setupSplunkLogger', 'splunk.setupSplunkLogger', (['logger', 'LOGGING_DEFAULT_CONFIG_FILE', 'LOGGING_LOCAL_CONFIG_FILE', 'LOGGING_STANZA_NAME'], {}), '(logger, LOGGING_DEFAULT_CONFIG_FILE,\n LOGGING_LOCAL_CONFIG_FILE, LOGGING_STANZA_NAME)\n', (1150, 1239), False, 'import splunk\n'), ((946, 1005), 'os.path.join', 'os.path.join', (['SPLUNK_HOME', 'BASE_LOG_PATH', 'LOGGING_FILE_NAME'], {}), '(SPLUNK_HOME, BASE_LOG_PATH, LOGGING_FILE_NAME)\n', (958, 1005), False, 'import sys, json, os, datetime\n'), ((1051, 1084), 'logging.Formatter', 'logging.Formatter', (['LOGGING_FORMAT'], {}), '(LOGGING_FORMAT)\n', (1068, 1084), False, 'import logging, logging.handlers\n'), ((1506, 1608), 'splunk.entity.getEntities', 'entity.getEntities', (["['admin', 'passwords']"], {'namespace': 'myapp', 'owner': '"""nobody"""', 'sessionKey': 'sessionKey'}), "(['admin', 'passwords'], namespace=myapp, owner='nobody',\n sessionKey=sessionKey)\n", (1524, 1608), True, 'import splunk.entity as entity\n'), ((3253, 3275), 'json.loads', 'json.loads', (['extra_vars'], {}), '(extra_vars)\n', (3263, 3275), False, 'import sys, json, os, datetime\n'), ((3298, 3314), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (3308, 3314), False, 'import sys, json, os, datetime\n'), ((3377, 3403), 'requests.post', 'requests.post', ([], {}), '(**post_data)\n', (3390, 3403), False, 'import requests\n'), ((4677, 4688), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4685, 4688), False, 'import sys, json, os, datetime\n'), ((4734, 4750), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (4748, 4750), False, 'import sys, json, os, datetime\n')]
|
import os
from dotenv import load_dotenv
from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
load_dotenv(verbose=True)
db_link = os.getenv("DB_LINK")
db = create_engine(db_link)
base = declarative_base()
class JS020(base):
__tablename__ = 'js020'
sno = Column(String(10), primary_key=True, nullable=False, index=True)
sname = Column(String(8), nullable=False)
sex = Column(String(3), nullable=False, default='男')
bdate = Column(Date, nullable=False, default='1970-01-01')
height = Column(Numeric(3, 2), nullable=False, default=0)
dorm = Column(String(15))
class JC020(base):
__tablename__ = 'jc020'
cno = Column(String(12), primary_key=True, nullable=False, index=True)
cname = Column(String(30), nullable=False)
period = Column(Numeric(4, 1), nullable=False, default=0)
credit = Column(Numeric(2, 1), nullable=False, default=0)
teacher = Column(String(10), nullable=False)
class JSC020(base):
__tablename__ = 'jsc020'
sno = Column(String(10), ForeignKey('js020.sno'), primary_key=True, nullable=False)
cno = Column(String(12), ForeignKey('jc020.cno'), primary_key=True, nullable=False)
grade = Column(Numeric(4, 1), nullable=True)
|
[
"sqlalchemy.String",
"sqlalchemy.ForeignKey",
"dotenv.load_dotenv",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.Numeric",
"sqlalchemy.Column",
"sqlalchemy.create_engine",
"os.getenv"
] |
[((179, 204), 'dotenv.load_dotenv', 'load_dotenv', ([], {'verbose': '(True)'}), '(verbose=True)\n', (190, 204), False, 'from dotenv import load_dotenv\n'), ((216, 236), 'os.getenv', 'os.getenv', (['"""DB_LINK"""'], {}), "('DB_LINK')\n", (225, 236), False, 'import os\n'), ((242, 264), 'sqlalchemy.create_engine', 'create_engine', (['db_link'], {}), '(db_link)\n', (255, 264), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((272, 290), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (288, 290), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((531, 581), 'sqlalchemy.Column', 'Column', (['Date'], {'nullable': '(False)', 'default': '"""1970-01-01"""'}), "(Date, nullable=False, default='1970-01-01')\n", (537, 581), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((358, 368), 'sqlalchemy.String', 'String', (['(10)'], {}), '(10)\n', (364, 368), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((435, 444), 'sqlalchemy.String', 'String', (['(8)'], {}), '(8)\n', (441, 444), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((479, 488), 'sqlalchemy.String', 'String', (['(3)'], {}), '(3)\n', (485, 488), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((602, 615), 'sqlalchemy.Numeric', 'Numeric', (['(3)', '(2)'], {}), '(3, 2)\n', (609, 615), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((662, 672), 'sqlalchemy.String', 'String', (['(15)'], {}), '(15)\n', (668, 672), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((741, 751), 'sqlalchemy.String', 'String', (['(12)'], {}), '(12)\n', (747, 751), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((818, 828), 'sqlalchemy.String', 'String', (['(30)'], {}), '(30)\n', (824, 828), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((866, 879), 'sqlalchemy.Numeric', 'Numeric', (['(4)', '(1)'], {}), '(4, 1)\n', (873, 879), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((928, 941), 'sqlalchemy.Numeric', 'Numeric', (['(2)', '(1)'], {}), '(2, 1)\n', (935, 941), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((991, 1001), 'sqlalchemy.String', 'String', (['(10)'], {}), '(10)\n', (997, 1001), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((1088, 1098), 'sqlalchemy.String', 'String', (['(10)'], {}), '(10)\n', (1094, 1098), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((1100, 1123), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""js020.sno"""'], {}), "('js020.sno')\n", (1110, 1123), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((1176, 1186), 'sqlalchemy.String', 'String', (['(12)'], {}), '(12)\n', (1182, 1186), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((1188, 1211), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""jc020.cno"""'], {}), "('jc020.cno')\n", (1198, 1211), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n'), ((1266, 1279), 'sqlalchemy.Numeric', 'Numeric', (['(4)', '(1)'], {}), '(4, 1)\n', (1273, 1279), False, 'from sqlalchemy import create_engine, Column, String, Date, Numeric, ForeignKey\n')]
|
from bilibili import bilibili
import requests
import asyncio
import time
class LotteryResult(bilibili):
async def query(self):
while 1:
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), "检查抽奖结果")
# print(self.activity_raffleid_list)
if self.activity_raffleid_list:
for i in range(0,len(self.activity_roomid_list)):
url = "http://api.live.bilibili.com/activity/v1/Raffle/notice?roomid="+str(self.activity_roomid_list[0])+"&raffleId="+str(self.activity_raffleid_list[0])
headers = {
'Accept': 'application/json, text/plain, */*',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'accept-encoding': 'gzip, deflate',
'Host': 'api.live.bilibili.com',
'cookie': self.cookie,
}
response = requests.get(url, headers=headers)
try:
print("# 房间", str(self.activity_roomid_list[0]).center(9), "网页端活动抽奖结果:", response.json()['data']['gift_name']+"x"+str(response.json()['data']['gift_num']))
del self.activity_roomid_list[0]
del self.activity_raffleid_list[0]
del self.activity_time_list[0]
except:
pass
# print(self.TV_raffleid_list)
if self.TV_raffleid_list:
for i in range(0, len(self.TV_roomid_list)):
url="http://api.live.bilibili.com/gift/v2/smalltv/notice?roomid="+str(self.TV_roomid_list[0])+"&raffleId="+str(self.TV_raffleid_list[0])
headers = {
'Accept': 'application/json, text/plain, */*',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'accept-encoding': 'gzip, deflate',
'Host': 'api.live.bilibili.com',
'cookie': self.cookie,
}
response = requests.get(url, headers=headers)
if response.json()['data']['gift_name'] != "":
try:
print("# 房间", str(self.TV_roomid_list[0]).center(9), "小电视道具抽奖结果:", (response.json()['data']['gift_name'])+"x"+str(response.json()['data']['gift_num']))
del self.TV_roomid_list[0]
del self.TV_raffleid_list[0]
del self.TV_time_list[0]
except:
pass
await asyncio.sleep(60)
|
[
"requests.get",
"asyncio.sleep",
"time.time"
] |
[((3016, 3033), 'asyncio.sleep', 'asyncio.sleep', (['(60)'], {}), '(60)\n', (3029, 3033), False, 'import asyncio\n'), ((1146, 1180), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (1158, 1180), False, 'import requests\n'), ((2457, 2491), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (2469, 2491), False, 'import requests\n'), ((230, 241), 'time.time', 'time.time', ([], {}), '()\n', (239, 241), False, 'import time\n')]
|
#!/usr/bin/env python
from telegram import TelegramObject
class Message(TelegramObject):
def __init__(self,
message_id,
from_user,
date,
chat,
forward_from=None,
forward_date=None,
reply_to_message=None,
text=None,
audio=None,
document=None,
photo=None,
sticker=None,
video=None,
contact=None,
location=None,
new_chat_participant=None,
left_chat_participant=None,
new_chat_title=None,
new_chat_photo=None,
delete_chat_photo=None,
group_chat_created=None):
self.message_id = message_id
self.from_user = from_user
self.date = date
self.chat = chat
self.forward_from = forward_from
self.forward_date = forward_date
self.reply_to_message = reply_to_message
self.text = text
self.audio = audio
self.document = document
self.photo = photo
self.sticker = sticker
self.video = video
self.contact = contact
self.location = location
self.new_chat_participant = new_chat_participant
self.left_chat_participant = left_chat_participant
self.new_chat_title = new_chat_title
self.new_chat_photo = new_chat_photo
self.delete_chat_photo = delete_chat_photo
self.group_chat_created = group_chat_created
@property
def chat_id(self):
return self.chat.id
@staticmethod
def de_json(data):
if 'from' in data: # from is a reserved word, use from_user instead.
from telegram import User
from_user = User.de_json(data['from'])
else:
from_user = None
if 'chat' in data:
if 'first_name' in data['chat']:
from telegram import User
chat = User.de_json(data['chat'])
if 'title' in data['chat']:
from telegram import GroupChat
chat = GroupChat.de_json(data['chat'])
else:
chat = None
if 'forward_from' in data:
from telegram import User
forward_from = User.de_json(data['forward_from'])
else:
forward_from = None
if 'reply_to_message' in data:
reply_to_message = Message.de_json(data['reply_to_message'])
else:
reply_to_message = None
if 'text' in data:
text = data['text']
else:
text = None
if 'audio' in data:
from telegram import Audio
audio = Audio.de_json(data['audio'])
else:
audio = None
if 'document' in data:
from telegram import Document
document = Document.de_json(data['document'])
else:
document = None
if 'photo' in data:
from telegram import PhotoSize
photo = [PhotoSize.de_json(x) for x in data['photo']]
else:
photo = None
if 'sticker' in data:
from telegram import Sticker
sticker = Sticker.de_json(data['sticker'])
else:
sticker = None
if 'video' in data:
from telegram import Video
video = Video.de_json(data['video'])
else:
video = None
if 'contact' in data:
from telegram import Contact
contact = Contact.de_json(data['contact'])
else:
contact = None
if 'location' in data:
from telegram import Location
location = Location.de_json(data['location'])
else:
location = None
if 'new_chat_participant' in data:
from telegram import User
new_chat_participant = User.de_json(data['new_chat_participant'])
else:
new_chat_participant = None
if 'left_chat_participant' in data:
from telegram import User
left_chat_participant = User.de_json(data['left_chat_participant'])
else:
left_chat_participant = None
return Message(message_id=data.get('message_id', None),
from_user=from_user,
date=data.get('date', None),
chat=chat,
forward_from=forward_from,
forward_date=data.get('forward_date', None),
reply_to_message=reply_to_message,
text=text,
audio=audio,
document=document,
photo=photo,
sticker=sticker,
video=video,
contact=contact,
location=location,
new_chat_participant=new_chat_participant,
left_chat_participant=left_chat_participant,
new_chat_title=data.get('new_chat_title', None),
new_chat_photo=data.get('new_chat_photo', None),
delete_chat_photo=data.get('delete_chat_photo', None),
group_chat_created=data.get('group_chat_created', None))
def to_dict(self):
data = {'message_id': self.message_id,
'from': self.from_user.to_dict(),
'date': self.date,
'chat': self.chat.to_dict()}
if self.forward_from:
data['forward_from'] = self.forward_from
if self.forward_date:
data['forward_date'] = self.forward_date
if self.reply_to_message:
data['reply_to_message'] = self.reply_to_message
if self.text:
data['text'] = self.text
if self.audio:
data['audio'] = self.audio.to_dict()
if self.document:
data['document'] = self.document.to_dict()
if self.photo:
data['photo'] = [p.to_dict() for p in self.photo]
if self.sticker:
data['sticker'] = self.sticker.to_dict()
if self.video:
data['video'] = self.video.to_dict()
if self.contact:
data['contact'] = self.contact.to_dict()
if self.location:
data['location'] = self.location.to_dict()
if self.new_chat_participant:
data['new_chat_participant'] = self.new_chat_participant
if self.left_chat_participant:
data['left_chat_participant'] = self.left_chat_participant
if self.new_chat_title:
data['new_chat_title'] = self.new_chat_title
if self.new_chat_photo:
data['new_chat_photo'] = self.new_chat_photo
if self.delete_chat_photo:
data['delete_chat_photo'] = self.delete_chat_photo
if self.group_chat_created:
data['group_chat_created'] = self.group_chat_created
return data
|
[
"telegram.Contact.de_json",
"telegram.Audio.de_json",
"telegram.Video.de_json",
"telegram.GroupChat.de_json",
"telegram.Document.de_json",
"telegram.Location.de_json",
"telegram.PhotoSize.de_json",
"telegram.User.de_json",
"telegram.Sticker.de_json"
] |
[((1925, 1951), 'telegram.User.de_json', 'User.de_json', (["data['from']"], {}), "(data['from'])\n", (1937, 1951), False, 'from telegram import User\n'), ((2457, 2491), 'telegram.User.de_json', 'User.de_json', (["data['forward_from']"], {}), "(data['forward_from'])\n", (2469, 2491), False, 'from telegram import User\n'), ((2903, 2931), 'telegram.Audio.de_json', 'Audio.de_json', (["data['audio']"], {}), "(data['audio'])\n", (2916, 2931), False, 'from telegram import Audio\n'), ((3074, 3108), 'telegram.Document.de_json', 'Document.de_json', (["data['document']"], {}), "(data['document'])\n", (3090, 3108), False, 'from telegram import Document\n'), ((3434, 3466), 'telegram.Sticker.de_json', 'Sticker.de_json', (["data['sticker']"], {}), "(data['sticker'])\n", (3449, 3466), False, 'from telegram import Sticker\n'), ((3602, 3630), 'telegram.Video.de_json', 'Video.de_json', (["data['video']"], {}), "(data['video'])\n", (3615, 3630), False, 'from telegram import Video\n'), ((3770, 3802), 'telegram.Contact.de_json', 'Contact.de_json', (["data['contact']"], {}), "(data['contact'])\n", (3785, 3802), False, 'from telegram import Contact\n'), ((3947, 3981), 'telegram.Location.de_json', 'Location.de_json', (["data['location']"], {}), "(data['location'])\n", (3963, 3981), False, 'from telegram import Location\n'), ((4147, 4189), 'telegram.User.de_json', 'User.de_json', (["data['new_chat_participant']"], {}), "(data['new_chat_participant'])\n", (4159, 4189), False, 'from telegram import User\n'), ((4369, 4412), 'telegram.User.de_json', 'User.de_json', (["data['left_chat_participant']"], {}), "(data['left_chat_participant'])\n", (4381, 4412), False, 'from telegram import User\n'), ((2140, 2166), 'telegram.User.de_json', 'User.de_json', (["data['chat']"], {}), "(data['chat'])\n", (2152, 2166), False, 'from telegram import User\n'), ((2280, 2311), 'telegram.GroupChat.de_json', 'GroupChat.de_json', (["data['chat']"], {}), "(data['chat'])\n", (2297, 2311), False, 'from telegram import GroupChat\n'), ((3250, 3270), 'telegram.PhotoSize.de_json', 'PhotoSize.de_json', (['x'], {}), '(x)\n', (3267, 3270), False, 'from telegram import PhotoSize\n')]
|
from setuptools import setup
setup(
name='graphs',
packages=['graphs'],
include_package_data=True,
install_requires=[
'flask',
'matplotlib',
]
)
|
[
"setuptools.setup"
] |
[((30, 144), 'setuptools.setup', 'setup', ([], {'name': '"""graphs"""', 'packages': "['graphs']", 'include_package_data': '(True)', 'install_requires': "['flask', 'matplotlib']"}), "(name='graphs', packages=['graphs'], include_package_data=True,\n install_requires=['flask', 'matplotlib'])\n", (35, 144), False, 'from setuptools import setup\n')]
|
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 <NAME> (lullimat/idea.deploy), <EMAIL>"
__credits__ = ["<NAME>"]
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
'''
Provides classes for the computation of the thermodynamic quantities related
to the Shan-Chen model
'''
import scipy.integrate as integrate
from scipy.optimize import fsolve, bisect
from numpy import linspace
from sympy import Rational, diff, simplify
from sympy import lambdify as sp_lambdify
from sympy import symbols as sp_symbols
from sympy import exp as sympy_exp
from sympy.solvers.solveset import nonlinsolve
from sympy.solvers import solve
from functools import reduce
import math
from idpy.LBM.SCFStencils import SCFStencils
from idpy.Utils.ManageData import ManageData
def FindSingleZeroRange(func, x_init, delta_val):
old_val, new_val = func(x_init), func(x_init)
while old_val * new_val > 0:
old_val = new_val
x_init += delta_val
new_val = func(x_init)
return (x_init - delta_val, x_init)
def FindZeroRanges(func, n_range, n_bins, n_delta, debug_flag = False):
zero_ranges = []
old_val, new_val = 0, 0
# Here I can use linspace
for n_i in range(n_bins):
new_val = func(n_range[0] + n_delta * n_i)
if debug_flag:
print(n_bins, n_i, n_range[0] + n_delta * n_i, new_val, old_val)
print(n_i > 0, old_val * new_val < 0, n_i > 0 and old_val * new_val < 0)
print()
if n_i > 0 and old_val * new_val < 0:
zero_ranges.append((n_range[0] + n_delta * (n_i - 1),
n_range[0] + n_delta * n_i))
old_val = new_val
return zero_ranges
def FindExtrema(func, f_arg, arg_range = (0.01,3.), arg_bins = 256):
d_func = lambda f_arg_: diff(func,f_arg).subs(f_arg, f_arg_)
arg_delta = (arg_range[1] - arg_range[0])/arg_bins
zero_ranges = FindZeroRanges(d_func, arg_range, arg_bins, arg_delta)
print("zero_ranges: ", zero_ranges)
extrema = []
for z_range in zero_ranges:
# Initialization point from LEFT -> z_range[0] NOT z_range[1]
arg_swap = bisect(d_func, z_range[0], z_range[1])
f_swap = func.subs(f_arg, arg_swap)
extrema.append((arg_swap,f_swap))
return extrema
class ShanChen:
# Symbols should be safe here
n, G, theta, psi, d_psi, e2 = \
sp_symbols("n G \\theta \\psi \\psi' e_{2}")
P = theta*n + Rational('1/2')*G*e2*psi**2
def __init__(self,
psi_f = None,
G_val = -3.6, theta_val = 1., e2_val = 1.,
n_eps = 0.01):
# Variables Init
self.psi_f = sympy_exp(-1/self.n) if psi_f is None else psi_f
#print(self.psi_f)
self.G_val, self.theta_val, self.e2_val = G_val, theta_val, e2_val
self.n_eps = n_eps
self.d_psi_f = diff(self.psi_f, self.n)
self.P_subs = self.P.subs(self.psi, self.psi_f).subs(self.G, self.G_val)
self.P_subs = self.P_subs.subs(self.theta, self.theta_val).subs(self.e2, self.e2_val)
self.P_subs_lamb = sp_lambdify(self.n, self.P_subs)
# Find Critical Point
## This substitution leaves both n and G free
P_subs_swap = self.P.subs(self.psi, self.psi_f)
P_subs_swap = P_subs_swap.subs(self.theta, self.theta_val)
P_subs_swap = P_subs_swap.subs(self.e2, self.e2_val)
self.d_P = diff(P_subs_swap, self.n)
self.dd_P = diff(self.d_P, self.n)
#print([self.d_P, self.dd_P])
self.critical_point = solve([self.d_P, self.dd_P], [self.G, self.n])
self.G_c, self.n_c = float(self.critical_point[0][0]), float(self.critical_point[0][1])
self.P_c = P_subs_swap.subs(self.n, self.n_c).subs(self.G, self.G_c)
if self.G_val * self.e2_val > self.G_c * self.e2_val:
print("The value of G: %f is above the critical point G_c: %f for the chosen %s" % (self.G_val, self.G_c, str(self.psi) + " = " + str(self.psi_f)))
print("-> No phase separation")
else:
# Find Extrema
lambda_tmp = sp_lambdify(self.n, self.P_subs - self.P_c)
## Here I want to find the value of the density that correxpond to the critical
## pressure because by construction this value of the density is larger than
## any coexistence extreme, and there is only one
self.range_ext = FindSingleZeroRange(lambda_tmp, self.n_eps, self.n_eps)[1]
## Hence we can look for extrema starting from self.n_eps to self.range_ext
## Cannot begin from zero because for some choices of \psi the derivative
## might be singular
print("Extrema:", self.range_ext)
self.extrema = FindExtrema(self.P_subs, self.n,
arg_range = (self.n_eps, self.range_ext))
self.coexistence_range = self.FindCoexistenceRange()
print("Coexistence range (n, P): ", self.coexistence_range)
print()
### Init Ends
def PressureTensorInit(self, py_stencil):
self.PTensor = self.PressureTensor(py_stencil)
def FlatInterfaceProperties(self, which_sol = 0, eps_val = None):
self.FInterface = self.FlatInterface(self, self.PTensor, which_sol, eps_val)
def FindCoexistenceRange(self):
coexistence_range = []
'''
With this check we can manage values of the coupling for which one has
negative pressures
'''
if self.extrema[1][1] > 0:
func_f = lambda f_arg_: (self.P_subs.subs(self.n, f_arg_) - self.extrema[1][1])
# Looking for the LEFT limit starting from ZERO
# and ending after the first stationary point
arg_swap = bisect(func_f, self.n_eps, self.extrema[0][0])
p_swap = self.P_subs.subs(self.n, arg_swap)
coexistence_range.append((arg_swap, p_swap))
else:
coexistence_range.append((0, 0))
# Looking for the RIGHT limit starting from the RIGHT extremum
# that is certainly at the LEFT of the value we are looking for
func_f = lambda f_arg_: (self.P_subs.subs(self.n, f_arg_) - self.extrema[0][1])
arg_swap = bisect(func_f, self.extrema[1][0] + self.n_eps, self.range_ext + self.n_eps)
p_swap = self.P_subs.subs(self.n, arg_swap)
coexistence_range.append((arg_swap, p_swap))
return coexistence_range
####################################################################################
### Subclass: FlatInterface
####################################################################################
class FlatInterface:
def __init__(self, SC, PTensor, which_sol, eps_val):
self.SC, self.PTensor = SC, PTensor
# defining epsilon
if eps_val is None:
self.eps_val = \
PTensor.p_consts_wf['\epsilon'](self.PTensor.py_stencil.w_sol[which_sol])
else:
self.eps_val = eps_val
print("eps_val:", self.eps_val)
self.beta_val = self.PTensor.p_consts_wf['\beta'](self.PTensor.py_stencil.w_sol[which_sol])
self.sigma_c_val = self.PTensor.p_consts_wf['\sigma_c'](self.PTensor.py_stencil.w_sol[which_sol])
self.tolman_c_val = self.PTensor.p_consts_wf['t_c'](self.PTensor.py_stencil.w_sol[which_sol])
self.dndx = None
# defining symbols
self.p_0, self.n_g, self.n_l, self.n_p, self.d_n = \
sp_symbols("p_0 n_g n_l n' \\frac{dn}{dr}")
self.eps = self.PTensor.p_consts_sym['\epsilon']
self.beta = self.PTensor.p_consts_sym['\beta']
self.sigma_c = self.PTensor.p_consts_sym['\sigma_c']
self.tolman_c = self.PTensor.p_consts_sym['t_c']
# Defining the integrand
self.integrand = (self.p_0 - self.SC.P)*self.SC.d_psi_f/(self.SC.psi_f**(1 + self.eps))
# Substituting \theta and e_2 and psi and eps and G
self.integrand = self.integrand.subs(self.SC.theta, self.SC.theta_val)
self.integrand = self.integrand.subs(self.SC.e2, 1)
self.integrand = self.integrand.subs(self.SC.psi, self.SC.psi_f)
self.integrand = self.integrand.subs(self.eps, self.eps_val)
self.integrand = self.integrand.subs(self.SC.G, self.SC.G_val)
# Make a function of n and p_0
self.integrand_np = \
(lambda n_, p_ :
self.integrand.subs(self.SC.n, n_).subs(self.p_0, p_).evalf())
# Numerical value of the Maxwell Construction's Integral
self.maxwell_integral = \
(lambda target_values:
integrate.quad((lambda n_ : self.integrand_np(n_, target_values[0][1])),
target_values[0][0], target_values[1][0])[0])
# Numerical value as a function of the delta density
self.maxwell_integral_delta = \
(lambda delta_: self.maxwell_integral(self.GuessDensitiesFlat(delta_)))
def GuessDensitiesFlat(self, delta):
target_values = []
arg_init = self.SC.coexistence_range[0][0] + delta
func_init = self.SC.P_subs.subs(self.SC.n, arg_init)
target_values.append((arg_init, func_init))
arg_range, arg_bins = [arg_init, self.SC.coexistence_range[1][0]], 2 ** 10
arg_delta = (arg_range[1] - arg_range[0])/arg_bins
delta_func_f = (lambda arg_:
(self.SC.P_subs.subs(self.SC.n, arg_) -
self.SC.P_subs.subs(self.SC.n, arg_range[0])))
zero_ranges = FindZeroRanges(delta_func_f, arg_range, arg_bins, arg_delta,
debug_flag = False)
# Always pick the last range for the stable solution: -1
#print("zero_ranges:", zero_ranges)
#print(bisect(delta_func_f, zero_ranges[0][0], zero_ranges[0][1]))
#print(bisect(delta_func_f, zero_ranges[-1][0], zero_ranges[-1][1]))
solution = bisect(delta_func_f, zero_ranges[-1][0], zero_ranges[-1][1])
arg_swap = solution
func_swap = self.SC.P_subs.subs(self.SC.n, arg_swap)
target_values.append((arg_swap, func_swap))
return target_values
def MechanicEquilibrium(self, n_bins = 32):
# Need to find the zero of self.maxwell_integral_delta
# Delta can vary between (0, and the difference between the gas maximum
# and the beginning of the coexistence region
'''
search_range = \
[self.SC.n_eps,
self.SC.extrema[0][0] - self.SC.coexistence_range[0][0] - self.SC.n_eps]
'''
search_range = \
[self.SC.n_eps,
self.SC.extrema[0][0] - self.SC.coexistence_range[0][0]]
search_delta = (search_range[1] - search_range[0])/n_bins
mech_eq_range = FindZeroRanges(self.maxwell_integral_delta,
search_range, n_bins, search_delta,
debug_flag = False)
mech_eq_delta = bisect(self.maxwell_integral_delta,
mech_eq_range[0][0], mech_eq_range[0][1])
self.mech_eq_zero = self.maxwell_integral_delta(mech_eq_delta)
self.mech_eq_target = self.GuessDensitiesFlat(mech_eq_delta)
print(self.mech_eq_target)
def DNDXLambda(self, rho_g):
prefactor = 24 * ((self.SC.psi_f)**self.eps)/(self.beta * self.SC.G * (self.SC.d_psi_f)**2)
prefactor = prefactor.subs(self.beta, self.beta_val)
prefactor = prefactor.subs(self.SC.G, self.SC.G_val)
prefactor = prefactor.subs(self.eps, self.eps_val)
prefactor_n = lambda n_: prefactor.subs(self.SC.n, n_).evalf()
self.dndx = lambda n_: math.sqrt(prefactor_n(n_) * self.maxwell_integral([rho_g, [n_, rho_g[1]]]))
def SurfaceTension(self, mech_eq_target):
self.DNDXLambda(mech_eq_target[0])
prefactor = self.SC.G_val * self.sigma_c_val
integrand_n = lambda n_: self.dndx(n_) * (self.SC.d_psi_f**2).subs(self.SC.n, n_).evalf()
integral = integrate.quad(integrand_n, mech_eq_target[0][0], mech_eq_target[1][0])
self.sigma_f = prefactor * integral[0]
return self.sigma_f
####################################################################################
### Subclass: PressureTensor
####################################################################################
class PressureTensor:
def __init__(self, py_stencil):
# One stencil at the time
self.py_stencil = py_stencil
# Associating weights symbols
self.w_sym = self.py_stencil.w_sym
self.w_sym_list = self.py_stencil.w_sym_list
# Get e_expr
if not hasattr(self.py_stencil, 'e_expr'):
self.py_stencil.GetWolfEqs()
if not hasattr(self.py_stencil, 'typ_eq_s'):
self.py_stencil.GetTypEqs()
self.e_expr = self.py_stencil.e_expr
self.typ_eq_s = self.py_stencil.typ_eq_s
self.B2q_expr = self.py_stencil.B2q_expr
self.B2n_expr = self.py_stencil.B2n_expr
# Initializing Pressure Tensor symbols
self.PConstants()
self.InitPCoeff()
self.PExpressW()
def GetExprValues(self, w_sol = None):
## Need to add the new constants: Chi/Lambda
if w_sol is None:
w_sol = self.py_stencil.w_sol[0]
print(self.e_expr)
print("Isotropy constants")
for elem in self.e_expr:
w_i = 0
swap_expr = self.e_expr[elem]
for w in self.w_sym_list:
swap_expr = swap_expr.subs(w, w_sol[w_i])
w_i += 1
print(self.e_expr[elem], swap_expr)
print("\n")
print("Pressure Tensor Constants")
for elem in self.p_consts_sym:
w_i = 0
swap_expr = self.p_consts_w[elem]
for w in self.w_sym_list:
swap_expr = swap_expr.subs(w, w_sol[w_i])
w_i += 1
print(self.p_consts_sym[elem], swap_expr)
print("\n")
print("Typical Equations")
for elem in self.typ_eq_s:
for eq in self.typ_eq_s[elem]:
swap_expr = eq
w_i = 0
for w_sym in self.w_sym_list:
swap_expr = swap_expr.subs(w_sym, w_sol[w_i])
w_i += 1
print(elem, self.typ_eq_s[elem], swap_expr)
print("\n")
print("Wolfram Equations: B2q")
for elem in self.B2n_expr:
for eq in self.B2n_expr[elem]:
swap_expr = eq
w_i = 0
for w_sym in self.w_sym_list:
swap_expr = swap_expr.subs(w_sym, w_sol[w_i])
w_i += 1
print(elem, self.B2n_expr[elem], swap_expr)
print("\n")
print("Wolfram Equations: B2q")
for elem in self.B2q_expr:
for eq in self.B2q_expr[elem]:
swap_expr = eq
w_i = 0
for w_sym in self.w_sym_list:
swap_expr = swap_expr.subs(w_sym, w_sol[w_i])
w_i += 1
print(elem, self.B2q_expr[elem], swap_expr)
def InitPCoeff(self):
# List of coefficients for the pressure tensor constants
# Need to do this because each stencil can have a different
# number of groups: for now: no more than the first 5!
self.alpha_c, self.beta_c, self.gamma_c, self.eta_c, self.kappa_c, self.lambda_c = \
[0] * 25, [0] * 25, [0] * 25, [0] * 25, [0] * 25, [0] * 25
self.sigma_c_c, self.tolman_c_c = [0] * 25, [0] * 25
self.lambda_i_c, self.lambda_t_c, self.lambda_n_c = [0] * 25, [0] * 25, [0] * 25
self.chi_i_c, self.chi_t_c, self.chi_n_c = [0] * 25, [0] * 25, [0] * 25
# alpha
self.alpha_c[4], self.alpha_c[5], self.alpha_c[8] = 2, 4, 4
self.alpha_c[9], self.alpha_c[10] = 12, 24
self.alpha_c[13], self.alpha_c[16], self.alpha_c[17] = Rational(88, 3), 40, 80
# beta
self.beta_c[1], self.beta_c[2], self.beta_c[4], self.beta_c[5], self.beta_c[8] = \
Rational("1/2"), 1, 6, 13, 12
self.beta_c[9], self.beta_c[10] = Rational(57, 2), 58
self.beta_c[13], self.beta_c[16], self.beta_c[17] = Rational(203, 3), 88, 177
# gamma
self.gamma_c[5], self.gamma_c[8], self.gamma_c[10] = 1, 4, Rational(8, 3)
self.gamma_c[13], self.gamma_c[17] = Rational(68, 3), 5
# eta
self.eta_c[2], self.eta_c[5], self.eta_c[8], self.eta_c[10] = 1, 7, 12, Rational(46,3)
self.eta_c[13], self.eta_c[17] = Rational(148, 3), 27
# kappa
self.kappa_c[5], self.kappa_c[8] = 4, 8
# lambda
self.lambda_c[2], self.lambda_c[5], self.lambda_c[8] = 2, 12, 24
# sigma_c
self.sigma_c_c[1], self.sigma_c_c[4], self.sigma_c_c[5] = -6, -96, -108
self.sigma_c_c[9], self.sigma_c_c[10] = -486, -768
self.sigma_c_c[13], self.sigma_c_c[16], self.sigma_c_c[17] = -300, -1536, 2700
# tolman_c
self.tolman_c_c[1], self.tolman_c_c[4], self.tolman_c_c[5] = \
-Rational('1/2'), -6, -6
# Lambda_s
self.lambda_i_c[1], self.lambda_i_c[2], self.lambda_i_c[4] = Rational('1/2'), -2, 6
self.lambda_i_c[5], self.lambda_i_c[8] = -6, -24
self.lambda_t_c[2], self.lambda_t_c[5], self.lambda_t_c[8] = 2, 12, 24
self.lambda_n_c[2], self.lambda_n_c[5], self.lambda_n_c[8] = 1, 7, 12
# chi_s
self.chi_i_c[4], self.chi_i_c[5], self.chi_i_c[8] = 2, -1, -8
self.chi_t_c[5], self.chi_t_c[8] = 4, 8
self.chi_n_c[5], self.chi_n_c[8] = 1, 4
def PConstants(self):
# Defining symbols
self.p_consts_sym = {}
self.p_consts_sym['\alpha'] = sp_symbols('\\alpha')
self.p_consts_sym['\beta'] = sp_symbols('\\beta')
self.p_consts_sym['\gamma'] = sp_symbols('\\gamma')
self.p_consts_sym['\eta'] = sp_symbols('\\eta')
self.p_consts_sym['\kappa'] = sp_symbols('\\kappa')
self.p_consts_sym['\lambda'] = sp_symbols('\\lambda')
self.p_consts_sym['\epsilon'] = sp_symbols('\\epsilon')
self.p_consts_sym['\sigma_c'] = sp_symbols('\\sigma_c')
self.p_consts_sym['t_c'] = sp_symbols('t_c')
# These symbols are not good anymore for higher order expansions
self.p_consts_sym['\Lambda_{N}'] = sp_symbols('\\Lambda_{N}')
self.p_consts_sym['\Lambda_{T}'] = sp_symbols('\\Lambda_{T}')
self.p_consts_sym['\Lambda_{I}'] = sp_symbols('\\Lambda_{I}')
self.p_consts_sym['\chi_{N}'] = sp_symbols('\\chi_{N}')
self.p_consts_sym['\chi_{T}'] = sp_symbols('\\chi_{T}')
self.p_consts_sym['\chi_{I}'] = sp_symbols('\\chi_{I}')
def PExpressW(self):
# Defining expressions: e
# Should use a dictionary for the coefficients
self.p_consts_w = {}
self.p_consts_w['\alpha'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\alpha'] += -12*self.alpha_c[len2] * self.w_sym[len2]
self.p_consts_w['\beta'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\beta'] += 12*self.beta_c[len2] * self.w_sym[len2]
self.p_consts_w['\gamma'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\gamma'] += -4*self.gamma_c[len2] * self.w_sym[len2]
self.p_consts_w['\eta'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\eta'] += 4*self.eta_c[len2] * self.w_sym[len2]
self.p_consts_w['\kappa'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\kappa'] += self.kappa_c[len2] * self.w_sym[len2]
self.p_consts_w['\lambda'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\lambda'] += self.kappa_c[len2] * self.w_sym[len2]
self.p_consts_w['\sigma_c'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\sigma_c'] += self.sigma_c_c[len2] * self.w_sym[len2]/12
self.p_consts_w['t_c'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['t_c'] += self.tolman_c_c[len2] * self.w_sym[len2]
# Lambdas, Chis
self.p_consts_w['\Lambda_{I}'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\Lambda_{I}'] += self.lambda_i_c[len2] * self.w_sym[len2]
self.p_consts_w['\Lambda_{T}'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\Lambda_{T}'] += self.lambda_t_c[len2] * self.w_sym[len2]
self.p_consts_w['\Lambda_{N}'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\Lambda_{N}'] += self.lambda_n_c[len2] * self.w_sym[len2]
self.p_consts_w['\chi_{I}'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\chi_{I}'] += self.chi_i_c[len2] * self.w_sym[len2]
self.p_consts_w['\chi_{T}'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\chi_{T}'] += self.chi_t_c[len2] * self.w_sym[len2]
self.p_consts_w['\chi_{N}'] = 0
for len2 in self.py_stencil.len_2s:
self.p_consts_w['\chi_{N}'] += self.chi_n_c[len2] * self.w_sym[len2]
self.p_consts_w['\epsilon'] = -2*self.p_consts_w['\alpha']/self.p_consts_w['\beta']
# Defining Lambdas
self.p_consts_wf = {}
for elem in self.p_consts_w:
self.p_consts_wf[str(elem)] = \
sp_lambdify([self.w_sym_list],
self.p_consts_w[str(elem)])
class ShanChanEquilibriumCache(ManageData):
def __init__(self,
stencil = None,
G = None, c2 = None, psi_f = None,
dump_file = 'SCEqCache'):
ManageData.__init__(self, dump_file = dump_file)
if stencil is None:
raise Exception("Missing argument stencil")
if G is None:
raise Exception("Missing argument G")
if c2 is None:
raise Exception("Missing argument c2")
if psi_f is None:
raise Exception("Missing argument psi_f")
'''
Looking for the file and data
'''
self.is_file, self.is_key = ManageData.Read(self), False
self.dict_string = (str(psi_f) + "_" + str(float(G)) + "_" +
str(c2) + "_" + str(stencil.w_sol[0]))
if self.is_file:
if self.dict_string in ManageData.WhichData(self):
self.data = ManageData.PullData(self, self.dict_string)
self.is_key = True
if self.is_key is False:
'''
I need to do this until I write a new pressure tensor class
that also computes the Taylor expansion for the flat interface
and consequently the expression for \varepsilon
'''
w1, w2, w4, w5, w8 = sp_symbols("w(1) w(2) w(4) w(5) w(8)")
w9, w10, w13, w16, w17 = sp_symbols("w(9) w(10) w(13) w(16) w(17)")
w_sym_list = [w1, w2, w4, w5, w8, w9, w10, w13, w16, w17]
_eps_expr = (+ 48*w4 + 96*w5 + 96*w8
+ 288*w9 + 576*w10 + 704*w13 + 960*w16 + 1920*w17)
_eps_expr /= (+ 6*w1 + 12*w2 + 72*w4 + 156*w5 + 144*w8
+ 342*w9 + 696*w10 + 812*w13 + 1056*w16 + 2124*w17)
self.eps_lambda = sp_lambdify([w_sym_list], _eps_expr)
_e2_expr = stencil.e_expr[2]
self.e2_lambda = sp_lambdify([w_sym_list], _e2_expr)
_weights_list = None
if len(stencil.w_sol[0]) != 10:
len_diff = 10 - len(stencil.w_sol[0])
if len_diff < 0:
raise Exception("The number of weights must be 5 at most!")
_weights_list = stencil.w_sol[0] + [0 for i in range(len_diff)]
else:
_weights_list = stencil.w_sol[0]
_shan_chen = \
ShanChen(psi_f = psi_f, G_val = G,
theta_val = c2,
e2_val = self.e2_lambda(_weights_list))
_shan_chen.PressureTensorInit(stencil)
_shan_chen.FlatInterfaceProperties()
_shan_chen.FInterface.MechanicEquilibrium()
_mech_eq_target = _shan_chen.FInterface.mech_eq_target
_sigma_f = \
_shan_chen.FInterface.SurfaceTension(_mech_eq_target)
_n_g = _shan_chen.FInterface.mech_eq_target[0][0]
_n_l = _shan_chen.FInterface.mech_eq_target[1][0]
_p_0 = _shan_chen.FInterface.mech_eq_target[1][1]
_n_c = _shan_chen.n_c
_G_c = _shan_chen.G_c
_data_dict = {'G_c': _G_c, 'n_c': _n_c,
'n_l': _n_l, 'n_g': _n_g,
'p_0': _p_0, 'sigma_f': _sigma_f}
self.PushData(data = _data_dict,
key = self.dict_string)
self.Dump()
def GetFromCache(self):
return ManageData.PullData(self, key = self.dict_string)
|
[
"sympy.symbols",
"sympy.solvers.solve",
"idpy.Utils.ManageData.ManageData.__init__",
"sympy.Rational",
"scipy.integrate.quad",
"sympy.diff",
"sympy.lambdify",
"idpy.Utils.ManageData.ManageData.WhichData",
"sympy.exp",
"scipy.optimize.bisect",
"idpy.Utils.ManageData.ManageData.PullData",
"idpy.Utils.ManageData.ManageData.Read"
] |
[((3469, 3513), 'sympy.symbols', 'sp_symbols', (['"""n G \\\\theta \\\\psi \\\\psi\' e_{2}"""'], {}), '("n G \\\\theta \\\\psi \\\\psi\' e_{2}")\n', (3479, 3513), True, 'from sympy import symbols as sp_symbols\n'), ((3230, 3268), 'scipy.optimize.bisect', 'bisect', (['d_func', 'z_range[0]', 'z_range[1]'], {}), '(d_func, z_range[0], z_range[1])\n', (3236, 3268), False, 'from scipy.optimize import fsolve, bisect\n'), ((3956, 3980), 'sympy.diff', 'diff', (['self.psi_f', 'self.n'], {}), '(self.psi_f, self.n)\n', (3960, 3980), False, 'from sympy import Rational, diff, simplify\n'), ((4192, 4224), 'sympy.lambdify', 'sp_lambdify', (['self.n', 'self.P_subs'], {}), '(self.n, self.P_subs)\n', (4203, 4224), True, 'from sympy import lambdify as sp_lambdify\n'), ((4522, 4547), 'sympy.diff', 'diff', (['P_subs_swap', 'self.n'], {}), '(P_subs_swap, self.n)\n', (4526, 4547), False, 'from sympy import Rational, diff, simplify\n'), ((4568, 4590), 'sympy.diff', 'diff', (['self.d_P', 'self.n'], {}), '(self.d_P, self.n)\n', (4572, 4590), False, 'from sympy import Rational, diff, simplify\n'), ((4659, 4705), 'sympy.solvers.solve', 'solve', (['[self.d_P, self.dd_P]', '[self.G, self.n]'], {}), '([self.d_P, self.dd_P], [self.G, self.n])\n', (4664, 4705), False, 'from sympy.solvers import solve\n'), ((7408, 7484), 'scipy.optimize.bisect', 'bisect', (['func_f', '(self.extrema[1][0] + self.n_eps)', '(self.range_ext + self.n_eps)'], {}), '(func_f, self.extrema[1][0] + self.n_eps, self.range_ext + self.n_eps)\n', (7414, 7484), False, 'from scipy.optimize import fsolve, bisect\n'), ((24548, 24594), 'idpy.Utils.ManageData.ManageData.__init__', 'ManageData.__init__', (['self'], {'dump_file': 'dump_file'}), '(self, dump_file=dump_file)\n', (24567, 24594), False, 'from idpy.Utils.ManageData import ManageData\n'), ((27858, 27905), 'idpy.Utils.ManageData.ManageData.PullData', 'ManageData.PullData', (['self'], {'key': 'self.dict_string'}), '(self, key=self.dict_string)\n', (27877, 27905), False, 'from idpy.Utils.ManageData import ManageData\n'), ((3754, 3776), 'sympy.exp', 'sympy_exp', (['(-1 / self.n)'], {}), '(-1 / self.n)\n', (3763, 3776), True, 'from sympy import exp as sympy_exp\n'), ((5221, 5264), 'sympy.lambdify', 'sp_lambdify', (['self.n', '(self.P_subs - self.P_c)'], {}), '(self.n, self.P_subs - self.P_c)\n', (5232, 5264), True, 'from sympy import lambdify as sp_lambdify\n'), ((6926, 6972), 'scipy.optimize.bisect', 'bisect', (['func_f', 'self.n_eps', 'self.extrema[0][0]'], {}), '(func_f, self.n_eps, self.extrema[0][0])\n', (6932, 6972), False, 'from scipy.optimize import fsolve, bisect\n'), ((8738, 8781), 'sympy.symbols', 'sp_symbols', (['"""p_0 n_g n_l n\' \\\\frac{dn}{dr}"""'], {}), '("p_0 n_g n_l n\' \\\\frac{dn}{dr}")\n', (8748, 8781), True, 'from sympy import symbols as sp_symbols\n'), ((11431, 11491), 'scipy.optimize.bisect', 'bisect', (['delta_func_f', 'zero_ranges[-1][0]', 'zero_ranges[-1][1]'], {}), '(delta_func_f, zero_ranges[-1][0], zero_ranges[-1][1])\n', (11437, 11491), False, 'from scipy.optimize import fsolve, bisect\n'), ((12622, 12699), 'scipy.optimize.bisect', 'bisect', (['self.maxwell_integral_delta', 'mech_eq_range[0][0]', 'mech_eq_range[0][1]'], {}), '(self.maxwell_integral_delta, mech_eq_range[0][0], mech_eq_range[0][1])\n', (12628, 12699), False, 'from scipy.optimize import fsolve, bisect\n'), ((13752, 13823), 'scipy.integrate.quad', 'integrate.quad', (['integrand_n', 'mech_eq_target[0][0]', 'mech_eq_target[1][0]'], {}), '(integrand_n, mech_eq_target[0][0], mech_eq_target[1][0])\n', (13766, 13823), True, 'import scipy.integrate as integrate\n'), ((20157, 20178), 'sympy.symbols', 'sp_symbols', (['"""\\\\alpha"""'], {}), "('\\\\alpha')\n", (20167, 20178), True, 'from sympy import symbols as sp_symbols\n'), ((20220, 20240), 'sympy.symbols', 'sp_symbols', (['"""\\\\beta"""'], {}), "('\\\\beta')\n", (20230, 20240), True, 'from sympy import symbols as sp_symbols\n'), ((20283, 20304), 'sympy.symbols', 'sp_symbols', (['"""\\\\gamma"""'], {}), "('\\\\gamma')\n", (20293, 20304), True, 'from sympy import symbols as sp_symbols\n'), ((20345, 20364), 'sympy.symbols', 'sp_symbols', (['"""\\\\eta"""'], {}), "('\\\\eta')\n", (20355, 20364), True, 'from sympy import symbols as sp_symbols\n'), ((20407, 20428), 'sympy.symbols', 'sp_symbols', (['"""\\\\kappa"""'], {}), "('\\\\kappa')\n", (20417, 20428), True, 'from sympy import symbols as sp_symbols\n'), ((20472, 20494), 'sympy.symbols', 'sp_symbols', (['"""\\\\lambda"""'], {}), "('\\\\lambda')\n", (20482, 20494), True, 'from sympy import symbols as sp_symbols\n'), ((20539, 20562), 'sympy.symbols', 'sp_symbols', (['"""\\\\epsilon"""'], {}), "('\\\\epsilon')\n", (20549, 20562), True, 'from sympy import symbols as sp_symbols\n'), ((20607, 20630), 'sympy.symbols', 'sp_symbols', (['"""\\\\sigma_c"""'], {}), "('\\\\sigma_c')\n", (20617, 20630), True, 'from sympy import symbols as sp_symbols\n'), ((20670, 20687), 'sympy.symbols', 'sp_symbols', (['"""t_c"""'], {}), "('t_c')\n", (20680, 20687), True, 'from sympy import symbols as sp_symbols\n'), ((20812, 20838), 'sympy.symbols', 'sp_symbols', (['"""\\\\Lambda_{N}"""'], {}), "('\\\\Lambda_{N}')\n", (20822, 20838), True, 'from sympy import symbols as sp_symbols\n'), ((20886, 20912), 'sympy.symbols', 'sp_symbols', (['"""\\\\Lambda_{T}"""'], {}), "('\\\\Lambda_{T}')\n", (20896, 20912), True, 'from sympy import symbols as sp_symbols\n'), ((20960, 20986), 'sympy.symbols', 'sp_symbols', (['"""\\\\Lambda_{I}"""'], {}), "('\\\\Lambda_{I}')\n", (20970, 20986), True, 'from sympy import symbols as sp_symbols\n'), ((21032, 21055), 'sympy.symbols', 'sp_symbols', (['"""\\\\chi_{N}"""'], {}), "('\\\\chi_{N}')\n", (21042, 21055), True, 'from sympy import symbols as sp_symbols\n'), ((21100, 21123), 'sympy.symbols', 'sp_symbols', (['"""\\\\chi_{T}"""'], {}), "('\\\\chi_{T}')\n", (21110, 21123), True, 'from sympy import symbols as sp_symbols\n'), ((21168, 21191), 'sympy.symbols', 'sp_symbols', (['"""\\\\chi_{I}"""'], {}), "('\\\\chi_{I}')\n", (21178, 21191), True, 'from sympy import symbols as sp_symbols\n'), ((25011, 25032), 'idpy.Utils.ManageData.ManageData.Read', 'ManageData.Read', (['self'], {}), '(self)\n', (25026, 25032), False, 'from idpy.Utils.ManageData import ManageData\n'), ((25677, 25715), 'sympy.symbols', 'sp_symbols', (['"""w(1) w(2) w(4) w(5) w(8)"""'], {}), "('w(1) w(2) w(4) w(5) w(8)')\n", (25687, 25715), True, 'from sympy import symbols as sp_symbols\n'), ((25753, 25795), 'sympy.symbols', 'sp_symbols', (['"""w(9) w(10) w(13) w(16) w(17)"""'], {}), "('w(9) w(10) w(13) w(16) w(17)')\n", (25763, 25795), True, 'from sympy import symbols as sp_symbols\n'), ((26193, 26229), 'sympy.lambdify', 'sp_lambdify', (['[w_sym_list]', '_eps_expr'], {}), '([w_sym_list], _eps_expr)\n', (26204, 26229), True, 'from sympy import lambdify as sp_lambdify\n'), ((26301, 26336), 'sympy.lambdify', 'sp_lambdify', (['[w_sym_list]', '_e2_expr'], {}), '([w_sym_list], _e2_expr)\n', (26312, 26336), True, 'from sympy import lambdify as sp_lambdify\n'), ((2881, 2898), 'sympy.diff', 'diff', (['func', 'f_arg'], {}), '(func, f_arg)\n', (2885, 2898), False, 'from sympy import Rational, diff, simplify\n'), ((18184, 18199), 'sympy.Rational', 'Rational', (['(88)', '(3)'], {}), '(88, 3)\n', (18192, 18199), False, 'from sympy import Rational, diff, simplify\n'), ((18338, 18353), 'sympy.Rational', 'Rational', (['"""1/2"""'], {}), "('1/2')\n", (18346, 18353), False, 'from sympy import Rational, diff, simplify\n'), ((18414, 18429), 'sympy.Rational', 'Rational', (['(57)', '(2)'], {}), '(57, 2)\n', (18422, 18429), False, 'from sympy import Rational, diff, simplify\n'), ((18498, 18514), 'sympy.Rational', 'Rational', (['(203)', '(3)'], {}), '(203, 3)\n', (18506, 18514), False, 'from sympy import Rational, diff, simplify\n'), ((18615, 18629), 'sympy.Rational', 'Rational', (['(8)', '(3)'], {}), '(8, 3)\n', (18623, 18629), False, 'from sympy import Rational, diff, simplify\n'), ((18679, 18694), 'sympy.Rational', 'Rational', (['(68)', '(3)'], {}), '(68, 3)\n', (18687, 18694), False, 'from sympy import Rational, diff, simplify\n'), ((18800, 18815), 'sympy.Rational', 'Rational', (['(46)', '(3)'], {}), '(46, 3)\n', (18808, 18815), False, 'from sympy import Rational, diff, simplify\n'), ((18860, 18876), 'sympy.Rational', 'Rational', (['(148)', '(3)'], {}), '(148, 3)\n', (18868, 18876), False, 'from sympy import Rational, diff, simplify\n'), ((19558, 19573), 'sympy.Rational', 'Rational', (['"""1/2"""'], {}), "('1/2')\n", (19566, 19573), False, 'from sympy import Rational, diff, simplify\n'), ((25236, 25262), 'idpy.Utils.ManageData.ManageData.WhichData', 'ManageData.WhichData', (['self'], {}), '(self)\n', (25256, 25262), False, 'from idpy.Utils.ManageData import ManageData\n'), ((25292, 25335), 'idpy.Utils.ManageData.ManageData.PullData', 'ManageData.PullData', (['self', 'self.dict_string'], {}), '(self, self.dict_string)\n', (25311, 25335), False, 'from idpy.Utils.ManageData import ManageData\n'), ((3532, 3547), 'sympy.Rational', 'Rational', (['"""1/2"""'], {}), "('1/2')\n", (3540, 3547), False, 'from sympy import Rational, diff, simplify\n'), ((19438, 19453), 'sympy.Rational', 'Rational', (['"""1/2"""'], {}), "('1/2')\n", (19446, 19453), False, 'from sympy import Rational, diff, simplify\n')]
|
from flask import render_template,request,redirect,url_for
from . import main
from ..requests import get_news, get_articles
from ..models import Source, Article
# Views
@main.route('/')
def index():
'''
Function that returns the index page and its data
'''
general_list = get_news('general')
health_list = get_news('health')
business_list = get_news('business')
technology_list = get_news('technology')
sports_list = get_news('sports')
entertainment_list = get_news('entertainment')
return render_template('index.html', general=general_list, health=health_list, business=business_list, sports=sports_list, technology=technology_list, entertainment=entertainment_list)
@main.route('/news/<id>')
def news (id):
'''
Returns the news article from a highlight
'''
news_args = get_articles(id)
return render_template("articles.html", news=news_args)
|
[
"flask.render_template"
] |
[((533, 718), 'flask.render_template', 'render_template', (['"""index.html"""'], {'general': 'general_list', 'health': 'health_list', 'business': 'business_list', 'sports': 'sports_list', 'technology': 'technology_list', 'entertainment': 'entertainment_list'}), "('index.html', general=general_list, health=health_list,\n business=business_list, sports=sports_list, technology=technology_list,\n entertainment=entertainment_list)\n", (548, 718), False, 'from flask import render_template, request, redirect, url_for\n'), ((930, 978), 'flask.render_template', 'render_template', (['"""articles.html"""'], {'news': 'news_args'}), "('articles.html', news=news_args)\n", (945, 978), False, 'from flask import render_template, request, redirect, url_for\n')]
|
import sys
import types
import inspect
def isstring(s):
# if we use Python 3
if (sys.version_info[0] >= 3):
return isinstance(s, str)
# we use Python 2
return isinstance(s, basestring)
def normalize_func(func):
# return None for builtins
if (inspect.isbuiltin(func)):
return None
return func
def get_doc(func):
doc = inspect.getdoc(func)
if doc is None:
func = normalize_func(func)
if func is None:
return None
else:
doc = inspect.getdoc(func)
return doc
def get_property_doc(target, prop):
for name, obj in inspect.getmembers(type(target), inspect.isdatadescriptor):
if (isinstance(obj, property) and name == prop):
return inspect.getdoc(obj.fget)
return None
def get_argspec(func):
try:
if sys.version_info[0] >= 3:
return inspect.getfullargspec(func)
else:
return inspect.getargspec(func)
except TypeError:
return None
def get_arguments(func):
func = normalize_func(func)
if func is None:
return None
argspec = get_argspec(func)
if argspec is None:
return None
args = argspec.args
if 'self' in args:
args.remove('self')
return args
def get_r_representation(default):
if callable(default) and hasattr(default, '__name__'):
arg_value = default.__name__
else:
if default is None:
arg_value = "NULL"
elif type(default) == type(True):
if default == True:
arg_value = "TRUE"
else:
arg_value = "FALSE"
elif isstring(default):
arg_value = "\"%s\"" % default
elif isinstance(default, int):
arg_value = "%rL" % default
elif isinstance(default, float):
arg_value = "%r" % default
elif isinstance(default, list):
arg_value = "c("
for i, item in enumerate(default):
if i is (len(default) - 1):
arg_value += "%s)" % get_r_representation(item)
else:
arg_value += "%s, " % get_r_representation(item)
elif isinstance(default, (tuple, set)):
arg_value = "list("
for i, item in enumerate(default):
if i is (len(default) - 1):
arg_value += "%s)" % get_r_representation(item)
else:
arg_value += "%s, " % get_r_representation(item)
elif isinstance(default, dict):
arg_value = "list("
for i in range(len(default)):
i_arg_value = "%s = %s" % \
(default.keys()[i], get_r_representation(default.values()[i]))
if i is (len(default) - 1):
arg_value += "%s)" % i_arg_value
else:
arg_value += "%s, " % i_arg_value
else:
arg_value = "%r" % default
# if the value starts with "tf." then convert to $ usage
if (arg_value.startswith("tf.")):
arg_value = arg_value.replace(".", "$")
return(arg_value)
def generate_signature_for_function(func):
"""Given a function, returns a string representing its args."""
func = normalize_func(func)
if func is None:
return None
args_list = []
argspec = get_argspec(func)
if argspec is None:
return None
first_arg_with_default = (
len(argspec.args or []) - len(argspec.defaults or []))
for arg in argspec.args[:first_arg_with_default]:
if arg == "self":
# Python documentation typically skips `self` when printing method
# signatures.
continue
args_list.append(arg)
if argspec.varargs == "args" and hasattr(argspec, 'keywords') and argspec.keywords == "kwds":
original_func = func.__closure__[0].cell_contents
return generate_signature_for_function(original_func)
if argspec.defaults:
for arg, default in zip(
argspec.args[first_arg_with_default:], argspec.defaults):
arg_value = get_r_representation(default)
args_list.append("%s = %s" % (arg, arg_value))
if argspec.varargs:
args_list.append("...")
if hasattr(argspec, 'keywords') and argspec.keywords:
args_list.append("...")
return "(" + ", ".join(args_list) + ")"
|
[
"inspect.getargspec",
"inspect.isbuiltin",
"inspect.getdoc",
"inspect.getfullargspec"
] |
[((278, 301), 'inspect.isbuiltin', 'inspect.isbuiltin', (['func'], {}), '(func)\n', (295, 301), False, 'import inspect\n'), ((368, 388), 'inspect.getdoc', 'inspect.getdoc', (['func'], {}), '(func)\n', (382, 388), False, 'import inspect\n'), ((500, 520), 'inspect.getdoc', 'inspect.getdoc', (['func'], {}), '(func)\n', (514, 520), False, 'import inspect\n'), ((719, 743), 'inspect.getdoc', 'inspect.getdoc', (['obj.fget'], {}), '(obj.fget)\n', (733, 743), False, 'import inspect\n'), ((835, 863), 'inspect.getfullargspec', 'inspect.getfullargspec', (['func'], {}), '(func)\n', (857, 863), False, 'import inspect\n'), ((887, 911), 'inspect.getargspec', 'inspect.getargspec', (['func'], {}), '(func)\n', (905, 911), False, 'import inspect\n')]
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# ufit, a universal scattering fitting suite
#
# Copyright (c) 2013-2019, <NAME> and contributors. All rights reserved.
# Licensed under a 2-clause BSD license, see LICENSE.
# *****************************************************************************
"""Models for different peak shapes."""
from numpy import exp, log, sqrt, sin, cos, pi
from scipy.special import wofz
from ufit.models import Model
__all__ = ['Gauss', 'GaussInt', 'Lorentz', 'LorentzInt',
'Voigt', 'PseudoVoigt', 'DHO']
class Gauss(Model):
"""Gaussian peak
Parameters:
* `pos` - Peak center position
* `ampl` - Amplitude at center
* `fwhm` - Full width at half maximum
"""
param_names = ['pos', 'ampl', 'fwhm']
def __init__(self, name='', pos=None, ampl=None, fwhm=None):
pp, pa, pf = self._init_params(name, self.param_names, locals())
# amplitude and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.fcn = lambda p, x: \
abs(p[pa]) * exp(-(x - p[pp])**2/p[pf]**2 * 4*log(2))
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1], # peak amplitude
self.params[2].name: 2*abs(w[0] - p[0]), # FWHM
}
class GaussInt(Model):
"""Gaussian peak with integrated intensity parameter
Parameters:
* `pos` - Peak center position
* `int` - Integrated intensity
* `fwhm` - Full width at half maximum
"""
param_names = ['pos', 'int', 'fwhm']
def __init__(self, name='', pos=None, int=None, fwhm=None):
pp, pint, pf = self._init_params(name, self.param_names, locals())
# integration and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.fcn = lambda p, x: \
abs(p[pint]) / (abs(p[pf]) * sqrt(pi/(4 * log(2)))) * \
exp(-(x - p[pp])**2/p[pf]**2 * 4*log(2))
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
fwhm = 2*abs(w[0] - p[0])
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1] * fwhm * sqrt(2*pi), # peak intensity (integrated)
self.params[2].name: fwhm, # FWHM
}
class Lorentz(Model):
"""Lorentzian peak
Parameters:
* `pos` - Peak center position
* `ampl` - Amplitude at center
* `fwhm` - Full width at half maximum
"""
param_names = ['pos', 'ampl', 'fwhm']
def __init__(self, name='', pos=None, ampl=None, fwhm=None):
pp, pa, pf = self._init_params(name, self.param_names, locals())
# amplitude and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.fcn = lambda p, x: abs(p[pa]) / (1 + 4*(x - p[pp])**2/p[pf]**2)
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1], # peak amplitude
self.params[2].name: 2*abs(w[0] - p[0]), # FWHM
}
class LorentzInt(Model):
"""Lorentzian peak with integrated intensity parameter
Parameters:
* `pos` - Peak center position
* `int` - Integrated intensity
* `fwhm` - Full width at half maximum
"""
param_names = ['pos', 'int', 'fwhm']
def __init__(self, name='', pos=None, int=None, fwhm=None):
pp, pint, pf = self._init_params(name, self.param_names, locals())
# integration and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.fcn = lambda p, x: 2 * abs(p[pint]) / (pi * p[pf]) / (1 + 4*(x - p[pp])**2/p[pf]**2)
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
fwhm = 2*abs(w[0] - p[0])
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1] * fwhm * pi/2, # integrated intensity
self.params[2].name: fwhm, # FWHM
}
class Voigt(Model):
"""Voigt peak
A convolution of a Gaussian and a Lorentzian.
Parameters:
* `pos` - Peak center position
* `ampl` - Amplitude at center
* `fwhm` - Full width at half maximum of the Gauss part
* `shape` - Lorentz contribution
"""
param_names = ['pos', 'ampl', 'fwhm', 'shape']
def __init__(self, name='', pos=None, ampl=None, fwhm=None, shape=None):
pp, pa, pf, psh = self._init_params(name, self.param_names, locals())
# amplitude and fwhms should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.params[3].finalize = abs
self.fcn = lambda p, x: \
p[pa] / wofz(1j*sqrt(log(2))*p[psh]).real * \
wofz(2*sqrt(log(2)) * (x-p[pp])/p[pf] + 1j*sqrt(log(2))*p[psh]).real
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1], # peak amplitude
self.params[2].name: 2*abs(w[0] - p[0]), # FWHM of Gauss
self.params[3].name: 0,
}
class PseudoVoigt(Model):
"""Pseudo-Voigt peak
A pseudo-convolution of a Gaussian and a Lorentzian.
Parameters:
* `pos` - Peak center position
* `ampl` - Amplitude at center
* `fwhm` - Full width at half maximum
* `eta` - Lorentzicity
"""
param_names = ['pos', 'ampl', 'fwhm', 'eta']
def __init__(self, name='', pos=None, ampl=None, fwhm=None, eta=0.5):
pp, pa, pf, pe = self._init_params(name, self.param_names, locals())
# amplitude and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
# eta should be between 0 and 1
self.params[3].finalize = lambda e: e % 1.0
self.fcn = lambda p, x: abs(p[pa]) * \
((p[pe] % 1.0) / (1 + 4*(x - p[pp])**2/p[pf]**2) +
(1-(p[pe] % 1.0)) * exp(-(x - p[pp])**2/p[pf]**2 * 4*log(2)))
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1], # peak amplitude
self.params[2].name: 2*abs(w[0] - p[0]), # FWHM
}
class DHO(Model):
"""Damped Harmonic Oscillator
Two Lorentzians centered around zero with a common width and amplitude,
respecting the Bose factor.
Parameters:
* `center` - Energy zero
* `pos` - omega_0
* `ampl` - Amplitude
* `gamma` - Damping
* `tt` - Temperature in K
"""
param_names = ['center', 'pos', 'ampl', 'gamma', 'tt']
def __init__(self, name='',
center=0, pos=None, ampl=None, gamma=None, tt=None):
pc, pp, pa, pg, ptt = self._init_params(name, self.param_names,
locals())
# pos, amplitude and gamma should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.params[3].finalize = abs
self.fcn = lambda p, x: x / (1. - exp(-11.6045*(x+0.00001) / p[ptt])) * \
abs(p[pa]) * abs(p[pg]) / \
((p[pp]**2 - (x - p[pc])**2)**2 + (p[pg]*(x - p[pc]))**2)
pick_points = ['left peak', 'width of left peak', 'right peak']
def convert_pick(self, p1, w, p2):
return {
self.params[0].name: 0.5*(p1[0] + p2[0]), # center
self.params[1].name: 0.5*abs(p1[0] - p2[0]), # position
self.params[2].name: p1[1] * 0.01, # peak amplitude
self.params[3].name: 2*abs(w[0] - p1[0]), # gamma
}
class Gauss2D(Model):
"""Gaussian peak in two dimensions
Parameters:
* `bkgd` - Background
* `pos_x` - X center position
* `pos_y` - Y center position
* `ampl` - amplitude
* `fwhm_x` - Full width in X direction
* `fwhm_y` - Full width in Y direction
* `theta` - rotation of Gaussian in radians
"""
param_names = ['bkgd', 'pos_x', 'pos_y', 'ampl', 'fwhm_x', 'fwhm_y', 'theta']
def __init__(self, name='', bkgd=None, pos_x=None, pos_y=None, ampl=None,
fwhm_x=None, fwhm_y=None, theta=None):
pb, ppx, ppy, pa, pfx, pfy, pth = self._init_params(
name, self.param_names, locals())
self.params[3].finalize = abs
self.params[4].finalize = abs
self.params[5].finalize = abs
def fcn(p, x):
# rotate coordinate system by theta
c, s = cos(p[pth]), sin(p[pth])
x1 = (x[:, 0] - p[ppx])*c - (x[:, 1] - p[ppy])*s
y1 = (x[:, 0] - p[ppx])*s + (x[:, 1] - p[ppy])*c
return abs(p[pb]) + abs(p[pa]) * \
exp(-x1**2/p[pfx]**2 * 4*log(2)) * \
exp(-y1**2/p[pfy]**2 * 4*log(2))
self.fcn = fcn
|
[
"numpy.log",
"numpy.sin",
"numpy.exp",
"numpy.cos",
"numpy.sqrt"
] |
[((2370, 2382), 'numpy.sqrt', 'sqrt', (['(2 * pi)'], {}), '(2 * pi)\n', (2374, 2382), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((8778, 8789), 'numpy.cos', 'cos', (['p[pth]'], {}), '(p[pth])\n', (8781, 8789), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((8791, 8802), 'numpy.sin', 'sin', (['p[pth]'], {}), '(p[pth])\n', (8794, 8802), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((1190, 1196), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (1193, 1196), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((2141, 2147), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (2144, 2147), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((9066, 9072), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (9069, 9072), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((6255, 6261), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (6258, 6261), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((7353, 7389), 'numpy.exp', 'exp', (['(-11.6045 * (x + 1e-05) / p[ptt])'], {}), '(-11.6045 * (x + 1e-05) / p[ptt])\n', (7356, 7389), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((9013, 9019), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (9016, 9019), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((2082, 2088), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (2085, 2088), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((4966, 4972), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (4969, 4972), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((5051, 5057), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (5054, 5057), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((5015, 5021), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (5018, 5021), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n')]
|
# Generated by Django 2.1 on 2020-03-12 15:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myCarApp', '0006_auto_20200312_1502'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='groups',
),
migrations.RemoveField(
model_name='user',
name='user_permissions',
),
migrations.RenameField(
model_name='lesseeprofile',
old_name='User',
new_name='user',
),
migrations.RenameField(
model_name='lessorprofile',
old_name='User',
new_name='user',
),
migrations.DeleteModel(
name='User',
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.migrations.DeleteModel",
"django.db.migrations.RenameField"
] |
[((226, 282), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""user"""', 'name': '"""groups"""'}), "(model_name='user', name='groups')\n", (248, 282), False, 'from django.db import migrations\n'), ((327, 393), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""user"""', 'name': '"""user_permissions"""'}), "(model_name='user', name='user_permissions')\n", (349, 393), False, 'from django.db import migrations\n'), ((438, 526), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""lesseeprofile"""', 'old_name': '"""User"""', 'new_name': '"""user"""'}), "(model_name='lesseeprofile', old_name='User',\n new_name='user')\n", (460, 526), False, 'from django.db import migrations\n'), ((579, 667), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""lessorprofile"""', 'old_name': '"""User"""', 'new_name': '"""user"""'}), "(model_name='lessorprofile', old_name='User',\n new_name='user')\n", (601, 667), False, 'from django.db import migrations\n'), ((720, 755), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""User"""'}), "(name='User')\n", (742, 755), False, 'from django.db import migrations\n')]
|
import numpy as np
from skopt.space import Space
from skopt.sampler import Grid
import matplotlib.pyplot as plt
import seaborn as sns
def plot_teacher_action():
space = Space([(-1., 1.), (-1., 1.)])
grid = Grid(border="include", use_full_layout=False)
action_manipulated = grid.generate(space.dimensions, 160)
action_manipulated = np.array(action_manipulated)
action_manipulated2 = \
np.append(action_manipulated[(action_manipulated[:, 0] < -0.3) * (action_manipulated[:, 1] < -0.3), :],
action_manipulated[(action_manipulated[:, 0] > 0.3) * (action_manipulated[:, 1] > 0.3), :],
axis=0)
action_manipulated2 = \
np.append(action_manipulated2,
action_manipulated[(action_manipulated[:, 0] > 0.3) * (action_manipulated[:, 1] < -0.3), :],
axis=0)
action_manipulated2 = \
np.append(action_manipulated2,
action_manipulated[(action_manipulated[:, 0] < -0.3) * (action_manipulated[:, 1] > 0.3), :],
axis=0)
action_manipulated = np.array([[-0.1, 0],
[0.1, 0],
[0, 0.1],
[0, -0.1],
[-0.25, 0],
[0.25, 0],
[0, 0.25],
[0, -0.25],
[-0.1, 0.1],
[0.1, 0.1],
[-0.1, -0.1],
[0.1, -0.1],
[-0.25, 0.25],
[0.25, 0.25],
[-0.25, -0.25],
[0.25, -0.25],
[0.1, 0.05],
[0.05, 0.1],
[0.05, -0.1],
[-0.25, 0.1],
[0.25, 0.8],
[0.6, 0.25],
[0.3, -0.25],
[-0.1, 0.7],
[0.9, 0.1],
[-0.1, -1],
[1, -0.1],
[-0.2, 0.75],
[0.5, 0.5],
[-0.5, -0.5],
[0.75, 0],
[0.15, 0.05],
[0.6, 0.1],
[0.4, -0.1],
[-0.25, 0.15],
[0.25, 0.9],
[-0.35, 0.25],
[0.5, -0.25],
[-0.19, 0.19],
[1, 1],
[-1, -1],
[0, 1],
[-1, 0],
[0.2, 0.75],
[-0.8, 0],
[0, -0.58]])
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
sns.scatterplot(data=action_manipulated, x=action_manipulated[:, 0], y=action_manipulated[:, 1])
plt.xlabel('velocity x')
plt.ylabel('velocity y')
plt.ylim(bottom=-1.05, top=1.05)
plt.xlim(-1.05, 1.05)
plt.savefig("art/plots/teacher_action_random.png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
sns.scatterplot(data=action_manipulated2, x=action_manipulated2[:, 0], y=action_manipulated2[:, 1])
plt.xlabel('velocity x')
plt.ylabel('velocity y')
plt.ylim(bottom=-1.05, top=1.05)
plt.xlim(-1.05, 1.05)
plt.savefig("art/plots/teacher_action_grid.png", dpi=100, transparent=True)
plt.show()
|
[
"seaborn.set_style",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"seaborn.scatterplot",
"matplotlib.pyplot.ylim",
"skopt.sampler.Grid",
"skopt.space.Space",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((175, 208), 'skopt.space.Space', 'Space', (['[(-1.0, 1.0), (-1.0, 1.0)]'], {}), '([(-1.0, 1.0), (-1.0, 1.0)])\n', (180, 208), False, 'from skopt.space import Space\n'), ((216, 261), 'skopt.sampler.Grid', 'Grid', ([], {'border': '"""include"""', 'use_full_layout': '(False)'}), "(border='include', use_full_layout=False)\n", (220, 261), False, 'from skopt.sampler import Grid\n'), ((349, 377), 'numpy.array', 'np.array', (['action_manipulated'], {}), '(action_manipulated)\n', (357, 377), True, 'import numpy as np\n'), ((414, 631), 'numpy.append', 'np.append', (['action_manipulated[(action_manipulated[:, 0] < -0.3) * (action_manipulated[\n :, 1] < -0.3), :]', 'action_manipulated[(action_manipulated[:, 0] > 0.3) * (action_manipulated[:,\n 1] > 0.3), :]'], {'axis': '(0)'}), '(action_manipulated[(action_manipulated[:, 0] < -0.3) * (\n action_manipulated[:, 1] < -0.3), :], action_manipulated[(\n action_manipulated[:, 0] > 0.3) * (action_manipulated[:, 1] > 0.3), :],\n axis=0)\n', (423, 631), True, 'import numpy as np\n'), ((690, 825), 'numpy.append', 'np.append', (['action_manipulated2', 'action_manipulated[(action_manipulated[:, 0] > 0.3) * (action_manipulated[:,\n 1] < -0.3), :]'], {'axis': '(0)'}), '(action_manipulated2, action_manipulated[(action_manipulated[:, 0] >\n 0.3) * (action_manipulated[:, 1] < -0.3), :], axis=0)\n', (699, 825), True, 'import numpy as np\n'), ((894, 1029), 'numpy.append', 'np.append', (['action_manipulated2', 'action_manipulated[(action_manipulated[:, 0] < -0.3) * (action_manipulated[\n :, 1] > 0.3), :]'], {'axis': '(0)'}), '(action_manipulated2, action_manipulated[(action_manipulated[:, 0] <\n -0.3) * (action_manipulated[:, 1] > 0.3), :], axis=0)\n', (903, 1029), True, 'import numpy as np\n'), ((1089, 1710), 'numpy.array', 'np.array', (['[[-0.1, 0], [0.1, 0], [0, 0.1], [0, -0.1], [-0.25, 0], [0.25, 0], [0, 0.25],\n [0, -0.25], [-0.1, 0.1], [0.1, 0.1], [-0.1, -0.1], [0.1, -0.1], [-0.25,\n 0.25], [0.25, 0.25], [-0.25, -0.25], [0.25, -0.25], [0.1, 0.05], [0.05,\n 0.1], [0.05, -0.1], [-0.25, 0.1], [0.25, 0.8], [0.6, 0.25], [0.3, -0.25\n ], [-0.1, 0.7], [0.9, 0.1], [-0.1, -1], [1, -0.1], [-0.2, 0.75], [0.5, \n 0.5], [-0.5, -0.5], [0.75, 0], [0.15, 0.05], [0.6, 0.1], [0.4, -0.1], [\n -0.25, 0.15], [0.25, 0.9], [-0.35, 0.25], [0.5, -0.25], [-0.19, 0.19],\n [1, 1], [-1, -1], [0, 1], [-1, 0], [0.2, 0.75], [-0.8, 0], [0, -0.58]]'], {}), '([[-0.1, 0], [0.1, 0], [0, 0.1], [0, -0.1], [-0.25, 0], [0.25, 0],\n [0, 0.25], [0, -0.25], [-0.1, 0.1], [0.1, 0.1], [-0.1, -0.1], [0.1, -\n 0.1], [-0.25, 0.25], [0.25, 0.25], [-0.25, -0.25], [0.25, -0.25], [0.1,\n 0.05], [0.05, 0.1], [0.05, -0.1], [-0.25, 0.1], [0.25, 0.8], [0.6, 0.25\n ], [0.3, -0.25], [-0.1, 0.7], [0.9, 0.1], [-0.1, -1], [1, -0.1], [-0.2,\n 0.75], [0.5, 0.5], [-0.5, -0.5], [0.75, 0], [0.15, 0.05], [0.6, 0.1], [\n 0.4, -0.1], [-0.25, 0.15], [0.25, 0.9], [-0.35, 0.25], [0.5, -0.25], [-\n 0.19, 0.19], [1, 1], [-1, -1], [0, 1], [-1, 0], [0.2, 0.75], [-0.8, 0],\n [0, -0.58]])\n', (1097, 1710), True, 'import numpy as np\n'), ((3255, 3267), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3265, 3267), True, 'import matplotlib.pyplot as plt\n'), ((3272, 3346), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""', "{'axes.grid': True, 'axes.edgecolor': 'black'}"], {}), "('whitegrid', {'axes.grid': True, 'axes.edgecolor': 'black'})\n", (3285, 3346), True, 'import seaborn as sns\n'), ((3351, 3452), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'action_manipulated', 'x': 'action_manipulated[:, 0]', 'y': 'action_manipulated[:, 1]'}), '(data=action_manipulated, x=action_manipulated[:, 0], y=\n action_manipulated[:, 1])\n', (3366, 3452), True, 'import seaborn as sns\n'), ((3452, 3476), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""velocity x"""'], {}), "('velocity x')\n", (3462, 3476), True, 'import matplotlib.pyplot as plt\n'), ((3481, 3505), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""velocity y"""'], {}), "('velocity y')\n", (3491, 3505), True, 'import matplotlib.pyplot as plt\n'), ((3510, 3542), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(-1.05)', 'top': '(1.05)'}), '(bottom=-1.05, top=1.05)\n', (3518, 3542), True, 'import matplotlib.pyplot as plt\n'), ((3547, 3568), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.05)', '(1.05)'], {}), '(-1.05, 1.05)\n', (3555, 3568), True, 'import matplotlib.pyplot as plt\n'), ((3573, 3650), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""art/plots/teacher_action_random.png"""'], {'dpi': '(100)', 'transparent': '(True)'}), "('art/plots/teacher_action_random.png', dpi=100, transparent=True)\n", (3584, 3650), True, 'import matplotlib.pyplot as plt\n'), ((3655, 3665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3663, 3665), True, 'import matplotlib.pyplot as plt\n'), ((3671, 3683), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3681, 3683), True, 'import matplotlib.pyplot as plt\n'), ((3688, 3762), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""', "{'axes.grid': True, 'axes.edgecolor': 'black'}"], {}), "('whitegrid', {'axes.grid': True, 'axes.edgecolor': 'black'})\n", (3701, 3762), True, 'import seaborn as sns\n'), ((3767, 3871), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'action_manipulated2', 'x': 'action_manipulated2[:, 0]', 'y': 'action_manipulated2[:, 1]'}), '(data=action_manipulated2, x=action_manipulated2[:, 0], y=\n action_manipulated2[:, 1])\n', (3782, 3871), True, 'import seaborn as sns\n'), ((3871, 3895), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""velocity x"""'], {}), "('velocity x')\n", (3881, 3895), True, 'import matplotlib.pyplot as plt\n'), ((3900, 3924), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""velocity y"""'], {}), "('velocity y')\n", (3910, 3924), True, 'import matplotlib.pyplot as plt\n'), ((3929, 3961), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(-1.05)', 'top': '(1.05)'}), '(bottom=-1.05, top=1.05)\n', (3937, 3961), True, 'import matplotlib.pyplot as plt\n'), ((3966, 3987), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.05)', '(1.05)'], {}), '(-1.05, 1.05)\n', (3974, 3987), True, 'import matplotlib.pyplot as plt\n'), ((3992, 4067), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""art/plots/teacher_action_grid.png"""'], {'dpi': '(100)', 'transparent': '(True)'}), "('art/plots/teacher_action_grid.png', dpi=100, transparent=True)\n", (4003, 4067), True, 'import matplotlib.pyplot as plt\n'), ((4072, 4082), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4080, 4082), True, 'import matplotlib.pyplot as plt\n')]
|
import json
import os
import torch
from torch import nn
from root import from_root
from src.adversaries.adversary import Adversary, AdversaryOutput
from src.experiments.config import create_resnet
from src.misc.collection_object import DictObject
from src.misc.utils import model_device
class MomentumFgsmTransfer(Adversary):
"""
Implements the Momentum Iterative FGSM method for generating adversarial examples in the context of black-box
transfer-based attack, i.e. perturbations are generated on the surrogate model passed in the constructor.
"""
surrogate_model: nn.Module
epsilon: float
n_iters: int
decay_factor: float
def __init__(self, surrogate_cfg_filepath: str, epsilon: float, n_iters: int, decay_factor: float):
self.init_surrogate_model(surrogate_cfg_filepath)
self.epsilon = epsilon
self.n_iters = n_iters
self.decay_factor = decay_factor
def init_surrogate_model(self, surrogate_cfg_filepath: str) -> None:
with open(from_root(surrogate_cfg_filepath), "r") as file:
cfg = DictObject(json.load(file))
self.surrogate_model = create_resnet(cfg)
self.surrogate_model = self.surrogate_model.to(cfg.model.device)
best_epoch_filepath = os.path.join(from_root(cfg.out_dirpath), "checkpoints/best_epoch.txt")
with open(best_epoch_filepath, "r") as file:
epoch = int(file.read())
checkpoint_filepath = os.path.join(from_root(cfg.out_dirpath), f"checkpoints/checkpoint_{epoch}.pth")
checkpoint = torch.load(checkpoint_filepath, map_location=model_device(self.surrogate_model))
self.surrogate_model.load_state_dict(checkpoint["model_state_dict"])
self.surrogate_model.eval()
for param in self.surrogate_model.parameters():
param.requires_grad = False
def __call__(self, model: nn.Module, images: torch.Tensor, labels: torch.Tensor) -> AdversaryOutput:
step_size = self.epsilon / self.n_iters
velocity = torch.zeros_like(images)
result = images.clone()
result.requires_grad = True
for _ in range(self.n_iters):
if result.grad is not None:
result.grad.detach_()
result.grad.zero_()
loss = self.compute_objective(self.surrogate_model, result, labels, "mean")
loss.backward()
velocity = self.decay_factor * velocity + result.grad / torch.norm(result.grad, p=1)
with torch.no_grad():
result += step_size * torch.sign(velocity)
result.clamp_(0, 1)
result.requires_grad = False
return AdversaryOutput(result, result - images)
|
[
"src.misc.utils.model_device",
"json.load",
"root.from_root",
"torch.zeros_like",
"torch.norm",
"src.adversaries.adversary.AdversaryOutput",
"torch.sign",
"src.experiments.config.create_resnet",
"torch.no_grad"
] |
[((1146, 1164), 'src.experiments.config.create_resnet', 'create_resnet', (['cfg'], {}), '(cfg)\n', (1159, 1164), False, 'from src.experiments.config import create_resnet\n'), ((2026, 2050), 'torch.zeros_like', 'torch.zeros_like', (['images'], {}), '(images)\n', (2042, 2050), False, 'import torch\n'), ((2670, 2710), 'src.adversaries.adversary.AdversaryOutput', 'AdversaryOutput', (['result', '(result - images)'], {}), '(result, result - images)\n', (2685, 2710), False, 'from src.adversaries.adversary import Adversary, AdversaryOutput\n'), ((1282, 1308), 'root.from_root', 'from_root', (['cfg.out_dirpath'], {}), '(cfg.out_dirpath)\n', (1291, 1308), False, 'from root import from_root\n'), ((1474, 1500), 'root.from_root', 'from_root', (['cfg.out_dirpath'], {}), '(cfg.out_dirpath)\n', (1483, 1500), False, 'from root import from_root\n'), ((1019, 1052), 'root.from_root', 'from_root', (['surrogate_cfg_filepath'], {}), '(surrogate_cfg_filepath)\n', (1028, 1052), False, 'from root import from_root\n'), ((1097, 1112), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1106, 1112), False, 'import json\n'), ((1607, 1641), 'src.misc.utils.model_device', 'model_device', (['self.surrogate_model'], {}), '(self.surrogate_model)\n', (1619, 1641), False, 'from src.misc.utils import model_device\n'), ((2505, 2520), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2518, 2520), False, 'import torch\n'), ((2459, 2487), 'torch.norm', 'torch.norm', (['result.grad'], {'p': '(1)'}), '(result.grad, p=1)\n', (2469, 2487), False, 'import torch\n'), ((2560, 2580), 'torch.sign', 'torch.sign', (['velocity'], {}), '(velocity)\n', (2570, 2580), False, 'import torch\n')]
|
from random import randint
from time import sleep
itens = ('Pedra', 'Papel','Tesoura')
computador = randint(0, 2)
print('''\033[1;31mSuas opções\033[m:
[ 0 ] PEDRA
[ 1 ] PAPEL
[ 2 ] TESOURA''')
jogador = int(input('\033[1;34mQual é a sua Jogada?\033[m '))
print('\033[1;30mJO\033[m')
sleep(1)
print('\033[1;34mKEN\033[m')
sleep(1)
print('\033[1;33mPO!!\033[m')
sleep(1)
print('\033[35m-=\033[m' * 11)
print('\033[1;32mComputador jogou\033[m ' ' \033[1;35m{}\033[m'.format(itens[computador]))
print('\033[1;36mJogador jogou\033[m ' ' \033[1;32m{}\033[m'. format(itens[jogador]))
print('\033[35m-=\033[m' * 11)
if computador == 0:# computador jogou PEDRA
if jogador == 0:
print('\033[1;37mEMPATE\033[m')
elif jogador == 1:
print('\033[1;43mJOGADOR VENCEU\033[m')
elif jogador == 2:
print('\033[1;31mCOMPUTADOR VENCEU\033[m')
else:
print('\033[4;33;40mJOGADA INVÁLIDA\033[m!')
elif computador == 1: # computador jogou PAPEL
if jogador == 0:
print('\033[1;31mCOMPUTADOR VENCEU\033[m')
elif jogador == 1:
print('\033[1;37mEMPATE\033[m')
elif jogador == 2:
print('\033[1;34mJOGADOR VENCEU\033[m')
else:
print('\033[4;33;;40mJOGADA INVÁLIDA\033[m!')
elif computador == 2: # computador jogou TESOURA
if jogador == 0:
print('\033[1;34mJOGADOR VENCEU\033[m')
elif jogador == 1:
print('\033[1;31mCOMPUTADOR VENCEU\033[m')
elif jogador == 2:
print('\033[1;37mEMPATE\033[m')
else:
print('\033[4;33;mJOGADA INVÁLIDA\033[m!')
|
[
"random.randint",
"time.sleep"
] |
[((100, 113), 'random.randint', 'randint', (['(0)', '(2)'], {}), '(0, 2)\n', (107, 113), False, 'from random import randint\n'), ((284, 292), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (289, 292), False, 'from time import sleep\n'), ((322, 330), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (327, 330), False, 'from time import sleep\n'), ((361, 369), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (366, 369), False, 'from time import sleep\n')]
|
# from distutils.core import setup
from setuptools import setup
import os
from docpie import __version__
setup(
name="docpie",
packages=["docpie"],
package_data={
'': [
'README.rst',
'LICENSE',
'CHANGELOG.md'
],
'docpie': [
'example/*.py',
'example/git/*.py'
],
},
version=__version__,
author="TylerTemp",
author_email="<EMAIL>",
url="http://docpie.comes.today/",
download_url="https://github.com/TylerTemp/docpie/tarball/%s/" % __version__,
license='MIT',
description=("An easy and Pythonic way to create "
"your POSIX command line interface"),
keywords='option arguments parsing optparse argparse getopt docopt',
long_description=open(
os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
platforms='any',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Topic :: Utilities',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
|
[
"os.path.dirname"
] |
[((822, 847), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (837, 847), False, 'import os\n')]
|
# Test convolving to different resolutions
# Test the effect of convolving straight to 20000 and convolving first to an intermediate resolution say 80000.
import matplotlib.pyplot as plt
import numpy as np
from IP_multi_Convolution import ip_convolution, unitary_Gauss
def main():
# fwhm = lambda/R
fwhm = 2046 / 100000
# Starting spectrum
wav = np.linspace(2040, 2050, 20000)
flux = (np.ones_like(wav) - unitary_Gauss(wav, 2045, fwhm) -
unitary_Gauss(wav, 2047, fwhm))
# range in which to have the convoled values. Be careful of the edges!
chip_limits = [2042, 2049]
# Convolution to 80k
R = 80000
wav_80k, flux_80k = ip_convolution(wav, flux, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
# Convolution to 50k
R = 50000
wav_50k, flux_50k = ip_convolution(wav, flux, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
wav_80k_50k, flux_80k_50k = ip_convolution(wav_80k, flux_80k, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
# Convolution to 20k
R = 20000
wav_80k_20k, flux_80k_20k = ip_convolution(wav_80k, flux_80k, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
wav_50k_20k, flux_50k_20k = ip_convolution(wav_50k, flux_50k, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
wav_80k_50k_20k, flux_80k_50k_20k = ip_convolution(wav_80k_50k, flux_80k_50k,
chip_limits, R, fwhm_lim=5.0,
plot=False, verbose=True)
# Convolution straight to 20000
wav_20k, flux_20k = ip_convolution(wav, flux, chip_limits, R, fwhm_lim=5.0,
plot=False, verbose=True)
# Plot the results
plt.figure(1)
plt.xlabel(r"wavelength [nm])")
plt.ylabel(r"flux [counts] ")
plt.plot(wav, flux / np.max(flux), color='k',
linestyle="-", label="Original spectra")
plt.plot(wav_80k, flux_80k / np.max(flux_80k), color='r', linestyle="-.", label="R=80k-20k")
plt.plot(wav_50k, flux_50k / np.max(flux_50k), color='b', linestyle="--", label="R=50k")
plt.plot(wav_80k_20k, flux_80k_20k / np.max(flux_80k_20k), color='r',
linestyle="-", label="R=80k-20k")
plt.plot(wav_50k_20k, flux_50k_20k / np.max(flux_50k_20k), color='b',
linestyle="-", label="R=50k20k")
plt.plot(wav_80k_50k_20k, flux_80k_50k_20k / np.max(flux_80k_50k_20k), color='m',
linestyle="-", label="R=80k-50k-20k")
plt.plot(wav_20k, flux_20k / np.max(flux_20k), color='c', linestyle="-", label="R=20k")
plt.legend(loc='best')
plt.title(r"Convolution by different Instrument Profiles")
plt.show()
if __name__ == "__main__":
# The IPcovolution fails if it is not run inside __name__ == "__main__"
main()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.ones_like",
"matplotlib.pyplot.legend",
"IP_multi_Convolution.unitary_Gauss",
"matplotlib.pyplot.figure",
"numpy.max",
"IP_multi_Convolution.ip_convolution",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((367, 397), 'numpy.linspace', 'np.linspace', (['(2040)', '(2050)', '(20000)'], {}), '(2040, 2050, 20000)\n', (378, 397), True, 'import numpy as np\n'), ((678, 764), 'IP_multi_Convolution.ip_convolution', 'ip_convolution', (['wav', 'flux', 'chip_limits', 'R'], {'fwhm_lim': '(5.0)', 'plot': '(False)', 'verbose': '(True)'}), '(wav, flux, chip_limits, R, fwhm_lim=5.0, plot=False, verbose\n =True)\n', (692, 764), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((863, 949), 'IP_multi_Convolution.ip_convolution', 'ip_convolution', (['wav', 'flux', 'chip_limits', 'R'], {'fwhm_lim': '(5.0)', 'plot': '(False)', 'verbose': '(True)'}), '(wav, flux, chip_limits, R, fwhm_lim=5.0, plot=False, verbose\n =True)\n', (877, 949), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((1017, 1110), 'IP_multi_Convolution.ip_convolution', 'ip_convolution', (['wav_80k', 'flux_80k', 'chip_limits', 'R'], {'fwhm_lim': '(5.0)', 'plot': '(False)', 'verbose': '(True)'}), '(wav_80k, flux_80k, chip_limits, R, fwhm_lim=5.0, plot=False,\n verbose=True)\n', (1031, 1110), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((1226, 1319), 'IP_multi_Convolution.ip_convolution', 'ip_convolution', (['wav_80k', 'flux_80k', 'chip_limits', 'R'], {'fwhm_lim': '(5.0)', 'plot': '(False)', 'verbose': '(True)'}), '(wav_80k, flux_80k, chip_limits, R, fwhm_lim=5.0, plot=False,\n verbose=True)\n', (1240, 1319), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((1396, 1489), 'IP_multi_Convolution.ip_convolution', 'ip_convolution', (['wav_50k', 'flux_50k', 'chip_limits', 'R'], {'fwhm_lim': '(5.0)', 'plot': '(False)', 'verbose': '(True)'}), '(wav_50k, flux_50k, chip_limits, R, fwhm_lim=5.0, plot=False,\n verbose=True)\n', (1410, 1489), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((1574, 1675), 'IP_multi_Convolution.ip_convolution', 'ip_convolution', (['wav_80k_50k', 'flux_80k_50k', 'chip_limits', 'R'], {'fwhm_lim': '(5.0)', 'plot': '(False)', 'verbose': '(True)'}), '(wav_80k_50k, flux_80k_50k, chip_limits, R, fwhm_lim=5.0,\n plot=False, verbose=True)\n', (1588, 1675), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((1843, 1929), 'IP_multi_Convolution.ip_convolution', 'ip_convolution', (['wav', 'flux', 'chip_limits', 'R'], {'fwhm_lim': '(5.0)', 'plot': '(False)', 'verbose': '(True)'}), '(wav, flux, chip_limits, R, fwhm_lim=5.0, plot=False, verbose\n =True)\n', (1857, 1929), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((1993, 2006), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2003, 2006), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2041), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""wavelength [nm])"""'], {}), "('wavelength [nm])')\n", (2021, 2041), True, 'import matplotlib.pyplot as plt\n'), ((2047, 2075), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""flux [counts] """'], {}), "('flux [counts] ')\n", (2057, 2075), True, 'import matplotlib.pyplot as plt\n'), ((2845, 2867), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2855, 2867), True, 'import matplotlib.pyplot as plt\n'), ((2872, 2929), 'matplotlib.pyplot.title', 'plt.title', (['"""Convolution by different Instrument Profiles"""'], {}), "('Convolution by different Instrument Profiles')\n", (2881, 2929), True, 'import matplotlib.pyplot as plt\n'), ((2935, 2945), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2943, 2945), True, 'import matplotlib.pyplot as plt\n'), ((475, 505), 'IP_multi_Convolution.unitary_Gauss', 'unitary_Gauss', (['wav', '(2047)', 'fwhm'], {}), '(wav, 2047, fwhm)\n', (488, 505), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((410, 427), 'numpy.ones_like', 'np.ones_like', (['wav'], {}), '(wav)\n', (422, 427), True, 'import numpy as np\n'), ((430, 460), 'IP_multi_Convolution.unitary_Gauss', 'unitary_Gauss', (['wav', '(2045)', 'fwhm'], {}), '(wav, 2045, fwhm)\n', (443, 460), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((2102, 2114), 'numpy.max', 'np.max', (['flux'], {}), '(flux)\n', (2108, 2114), True, 'import numpy as np\n'), ((2214, 2230), 'numpy.max', 'np.max', (['flux_80k'], {}), '(flux_80k)\n', (2220, 2230), True, 'import numpy as np\n'), ((2311, 2327), 'numpy.max', 'np.max', (['flux_50k'], {}), '(flux_50k)\n', (2317, 2327), True, 'import numpy as np\n'), ((2412, 2432), 'numpy.max', 'np.max', (['flux_80k_20k'], {}), '(flux_80k_20k)\n', (2418, 2432), True, 'import numpy as np\n'), ((2533, 2553), 'numpy.max', 'np.max', (['flux_50k_20k'], {}), '(flux_50k_20k)\n', (2539, 2553), True, 'import numpy as np\n'), ((2661, 2685), 'numpy.max', 'np.max', (['flux_80k_50k_20k'], {}), '(flux_80k_50k_20k)\n', (2667, 2685), True, 'import numpy as np\n'), ((2782, 2798), 'numpy.max', 'np.max', (['flux_20k'], {}), '(flux_20k)\n', (2788, 2798), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
""" Module to define API routes
"""
from api.v1.views import app_views
from flask import jsonify, request, abort, make_response
from engine.score_engine import score_engine
import requests
@app_views.route('/status', methods=['GET'], strict_slashes=False)
def status():
""" Status of API """
return jsonify({"status": "OK"})
@app_views.route('/<user_id>', methods=['GET'], strict_slashes=False)
def user_fetch(user_id):
""" Takes Torre username, fetches API and returns points """
req_string = "https://bio.torre.co/api/bios/" + str(user_id)
response = requests.get(req_string)
if response.status_code > 399:
return jsonify({"status": "error"})
my_response = response.json()
my_dict = my_response.get('stats')
my_user = my_response.get('person').get('name')
my_headline = my_response.get('person').get('professionalHeadline')
result = score_engine(**my_dict)
result['name'] = my_user
result['headline'] = my_headline
return jsonify(result)
|
[
"engine.score_engine.score_engine",
"flask.jsonify",
"api.v1.views.app_views.route",
"requests.get"
] |
[((215, 280), 'api.v1.views.app_views.route', 'app_views.route', (['"""/status"""'], {'methods': "['GET']", 'strict_slashes': '(False)'}), "('/status', methods=['GET'], strict_slashes=False)\n", (230, 280), False, 'from api.v1.views import app_views\n'), ((360, 428), 'api.v1.views.app_views.route', 'app_views.route', (['"""/<user_id>"""'], {'methods': "['GET']", 'strict_slashes': '(False)'}), "('/<user_id>', methods=['GET'], strict_slashes=False)\n", (375, 428), False, 'from api.v1.views import app_views\n'), ((332, 357), 'flask.jsonify', 'jsonify', (["{'status': 'OK'}"], {}), "({'status': 'OK'})\n", (339, 357), False, 'from flask import jsonify, request, abort, make_response\n'), ((599, 623), 'requests.get', 'requests.get', (['req_string'], {}), '(req_string)\n', (611, 623), False, 'import requests\n'), ((923, 946), 'engine.score_engine.score_engine', 'score_engine', ([], {}), '(**my_dict)\n', (935, 946), False, 'from engine.score_engine import score_engine\n'), ((1025, 1040), 'flask.jsonify', 'jsonify', (['result'], {}), '(result)\n', (1032, 1040), False, 'from flask import jsonify, request, abort, make_response\n'), ((679, 707), 'flask.jsonify', 'jsonify', (["{'status': 'error'}"], {}), "({'status': 'error'})\n", (686, 707), False, 'from flask import jsonify, request, abort, make_response\n')]
|
import random
import time
random.seed(time.time())
def create_zero(count):
char_list = '0Oo***.\、.----。、~!O@0o/L#$%0/LOg/Lo^./L**&00.00*()0。g/L、、--/L---+|/0Oo[]#%$¥0~-/L--!/L@#oo*~~~¥0O%&*OO。[]0Oog/L'
lines = ''
for i in range(count):
# print("{} random {}".format(i, random.randint(3, 10)))
line_length = random.randint(3, 10)
line = ''
for j in range(line_length):
start = random.randint(0, len(char_list) -2)
line += char_list[start: start+2]
#print(line)
lines += (line +'\n')
return lines
def create_char(count):
start_char = '#%!~*-^*/+#%*、,。.*.。*'
char_list = 'ABCDEFGHIJKLMNOPQRSTUVWXYZA'
lines = ''
for i in range(count):
line_length = random.randint(3, 8)
line = start_char[random.randint(0, len(start_char)-1)]
for j in range(line_length):
line += char_list[random.randint(0, len(char_list)-1)]
#print(line)
lines += (line +'\n')
return lines
def create_method_1(count):
char_split = ['--', '~', '--', '%', '/L', 'g/L', 'mg/L', 'L/L', '^', '=>', '<=', '*', '、', '。']
lines = ''
for i in range(count):
a = random.randint(10, 100000) / 1000
b = random.randint(10, 100000) / 1000
lines += "{}{}{}\n".format(a, char_split[random.randint(0, len(char_split)-1)], b)
return lines
def create_number_1(count):
char_list = '.。,壹贰叁肆伍陆柒捌玖拾佰仟.。,一二三四五六七八九十元百千万亿.。/,1234567890.。,、**%~##'
lines = ''
for i in range(count):
line_length = random.randint(3, 8)
line = ''
for j in range(line_length):
line += char_list[random.randint(0, len(char_list)-1)]
#print(line)
lines += (line +'\n')
return lines
def create_number_2(count):
char_list = '+-*/%¥¥¥$$$***... 。。。、、、~~~***--%%%***、~~=@#'
lines = ''
for i in range(count):
line = '{}{}{}'.format(random.randint(0,100000)/1000.0,
char_list[random.randint(0, len(char_list)-1)],
random.randint(0,100000)/1000.0)
lines += (line +'\n')
return lines
if __name__ == "__main__":
labels_file = '../output/spec_chars_02.txt'
total_lines = ''
#total_lines += create_number_2(200)
total_lines += create_zero(3000)
#total_lines += create_char(200)
total_lines += create_method_1(2000)
# print(total_lines)
lines = total_lines.split('\n')
print("length : {} ".format(len(lines)))
line_list = []
for line in lines:
if len(line) < 1:
continue
line_list.append(line)
line_list = list(set(line_list))
random.shuffle(line_list)
lines = '\n'.join(line_list)
#print(lines)
with open(labels_file, "w") as f:
f.write(lines)
print('【输出】生成文件 输出路径{}, 对象个数 {}.'.format(labels_file, len(line_list)))
|
[
"random.shuffle",
"random.randint",
"time.time"
] |
[((39, 50), 'time.time', 'time.time', ([], {}), '()\n', (48, 50), False, 'import time\n'), ((2698, 2723), 'random.shuffle', 'random.shuffle', (['line_list'], {}), '(line_list)\n', (2712, 2723), False, 'import random\n'), ((339, 360), 'random.randint', 'random.randint', (['(3)', '(10)'], {}), '(3, 10)\n', (353, 360), False, 'import random\n'), ((766, 786), 'random.randint', 'random.randint', (['(3)', '(8)'], {}), '(3, 8)\n', (780, 786), False, 'import random\n'), ((1567, 1587), 'random.randint', 'random.randint', (['(3)', '(8)'], {}), '(3, 8)\n', (1581, 1587), False, 'import random\n'), ((1208, 1234), 'random.randint', 'random.randint', (['(10)', '(100000)'], {}), '(10, 100000)\n', (1222, 1234), False, 'import random\n'), ((1254, 1280), 'random.randint', 'random.randint', (['(10)', '(100000)'], {}), '(10, 100000)\n', (1268, 1280), False, 'import random\n'), ((1944, 1969), 'random.randint', 'random.randint', (['(0)', '(100000)'], {}), '(0, 100000)\n', (1958, 1969), False, 'import random\n'), ((2087, 2112), 'random.randint', 'random.randint', (['(0)', '(100000)'], {}), '(0, 100000)\n', (2101, 2112), False, 'import random\n')]
|
import os
import random
import re
import ssl
import tempfile
from urllib import request
import cv2
import imageio
import numpy as np
import tensorflow as tf
import tensorflow_hub as tfhub
UCF_ROOT = 'https://www.crcv.ucf.edu/THUMOS14/UCF101/UCF101/'
KINETICS_URL = ('https://raw.githubusercontent.com/deepmind/'
'kinetics-i3d/master/data/label_map.txt')
CACHE_DIR = tempfile.mkdtemp()
UNVERIFIED_CONTEXT = ssl._create_unverified_context()
def fetch_ucf_videos():
index = \
(request
.urlopen(UCF_ROOT, context=UNVERIFIED_CONTEXT)
.read()
.decode('utf-8'))
videos = re.findall('(v_[\w]+\.avi)', index)
return sorted(set(videos))
def fetch_kinetics_labels():
with request.urlopen(KINETICS_URL) as f:
labels = [line.decode('utf-8').strip()
for line in f.readlines()]
return labels
def fetch_random_video(videos_list):
video_name = random.choice(videos_list)
cache_path = os.path.join(CACHE_DIR, video_name)
if not os.path.exists(cache_path):
url = request.urljoin(UCF_ROOT, video_name)
response = (request
.urlopen(url,
context=UNVERIFIED_CONTEXT)
.read())
with open(cache_path, 'wb') as f:
f.write(response)
return cache_path
def crop_center(frame):
height, width = frame.shape[:2]
smallest_dimension = min(width, height)
x_start = (width // 2) - (smallest_dimension // 2)
x_end = x_start + smallest_dimension
y_start = (height // 2) - (smallest_dimension // 2)
y_end = y_start + smallest_dimension
roi = frame[y_start:y_end, x_start:x_end]
return roi
def read_video(path, max_frames=32, resize=(224, 224)):
capture = cv2.VideoCapture(path)
frames = []
while len(frames) <= max_frames:
frame_read, frame = capture.read()
if not frame_read:
break
frame = crop_center(frame)
frame = cv2.resize(frame, resize)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(frame)
capture.release()
frames = np.array(frames)
return frames / 255.
def predict(model, labels, sample_video):
model_input = tf.constant(sample_video,
dtype=tf.float32)
model_input = model_input[tf.newaxis, ...]
logits = model(model_input)['default'][0]
probabilities = tf.nn.softmax(logits)
print('Top 5 actions:')
for i in np.argsort(probabilities)[::-1][:5]:
print(f'{labels[i]}: {probabilities[i] * 100:5.2f}%')
def save_as_gif(images, video_name):
converted_images = np.clip(images * 255, 0, 255)
converted_images = converted_images.astype(np.uint8)
imageio.mimsave(f'./{video_name}.gif',
converted_images,
fps=25)
VIDEO_LIST = fetch_ucf_videos()
LABELS = fetch_kinetics_labels()
video_path = fetch_random_video(VIDEO_LIST)
sample_video = read_video(video_path)
model_path = 'https://tfhub.dev/deepmind/i3d-kinetics-400/1'
model = tfhub.load(model_path)
model = model.signatures['default']
predict(model, LABELS, sample_video)
video_name = video_path.rsplit('/', maxsplit=1)[1][:-4]
save_as_gif(sample_video, video_name)
|
[
"cv2.resize",
"tensorflow.nn.softmax",
"tensorflow_hub.load",
"os.path.join",
"cv2.cvtColor",
"os.path.exists",
"random.choice",
"tensorflow.constant",
"numpy.clip",
"cv2.VideoCapture",
"urllib.request.urlopen",
"numpy.argsort",
"tempfile.mkdtemp",
"re.findall",
"numpy.array",
"ssl._create_unverified_context",
"imageio.mimsave",
"urllib.request.urljoin"
] |
[((385, 403), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (401, 403), False, 'import tempfile\n'), ((425, 457), 'ssl._create_unverified_context', 'ssl._create_unverified_context', ([], {}), '()\n', (455, 457), False, 'import ssl\n'), ((3091, 3113), 'tensorflow_hub.load', 'tfhub.load', (['model_path'], {}), '(model_path)\n', (3101, 3113), True, 'import tensorflow_hub as tfhub\n'), ((628, 665), 're.findall', 're.findall', (['"""(v_[\\\\w]+\\\\.avi)"""', 'index'], {}), "('(v_[\\\\w]+\\\\.avi)', index)\n", (638, 665), False, 'import re\n'), ((938, 964), 'random.choice', 'random.choice', (['videos_list'], {}), '(videos_list)\n', (951, 964), False, 'import random\n'), ((982, 1017), 'os.path.join', 'os.path.join', (['CACHE_DIR', 'video_name'], {}), '(CACHE_DIR, video_name)\n', (994, 1017), False, 'import os\n'), ((1789, 1811), 'cv2.VideoCapture', 'cv2.VideoCapture', (['path'], {}), '(path)\n', (1805, 1811), False, 'import cv2\n'), ((2155, 2171), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (2163, 2171), True, 'import numpy as np\n'), ((2260, 2303), 'tensorflow.constant', 'tf.constant', (['sample_video'], {'dtype': 'tf.float32'}), '(sample_video, dtype=tf.float32)\n', (2271, 2303), True, 'import tensorflow as tf\n'), ((2448, 2469), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (2461, 2469), True, 'import tensorflow as tf\n'), ((2674, 2703), 'numpy.clip', 'np.clip', (['(images * 255)', '(0)', '(255)'], {}), '(images * 255, 0, 255)\n', (2681, 2703), True, 'import numpy as np\n'), ((2766, 2830), 'imageio.mimsave', 'imageio.mimsave', (['f"""./{video_name}.gif"""', 'converted_images'], {'fps': '(25)'}), "(f'./{video_name}.gif', converted_images, fps=25)\n", (2781, 2830), False, 'import imageio\n'), ((735, 764), 'urllib.request.urlopen', 'request.urlopen', (['KINETICS_URL'], {}), '(KINETICS_URL)\n', (750, 764), False, 'from urllib import request\n'), ((1030, 1056), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (1044, 1056), False, 'import os\n'), ((1072, 1109), 'urllib.request.urljoin', 'request.urljoin', (['UCF_ROOT', 'video_name'], {}), '(UCF_ROOT, video_name)\n', (1087, 1109), False, 'from urllib import request\n'), ((2008, 2033), 'cv2.resize', 'cv2.resize', (['frame', 'resize'], {}), '(frame, resize)\n', (2018, 2033), False, 'import cv2\n'), ((2050, 2088), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (2062, 2088), False, 'import cv2\n'), ((2512, 2537), 'numpy.argsort', 'np.argsort', (['probabilities'], {}), '(probabilities)\n', (2522, 2537), True, 'import numpy as np\n'), ((1131, 1179), 'urllib.request.urlopen', 'request.urlopen', (['url'], {'context': 'UNVERIFIED_CONTEXT'}), '(url, context=UNVERIFIED_CONTEXT)\n', (1146, 1179), False, 'from urllib import request\n'), ((507, 560), 'urllib.request.urlopen', 'request.urlopen', (['UCF_ROOT'], {'context': 'UNVERIFIED_CONTEXT'}), '(UCF_ROOT, context=UNVERIFIED_CONTEXT)\n', (522, 560), False, 'from urllib import request\n')]
|
import io
import json
from collections import defaultdict
from unittest.mock import patch
import responses
from httmock import HTTMock, response, urlmatch
from tests.helpers import DummyHttpResponse
from filestack import Client
from filestack.config import MULTIPART_START_URL
from filestack.uploads.multipart import upload_chunk, Chunk
APIKEY = 'APIKEY'
HANDLE = 'SOMEHANDLE'
URL = 'https://cdn.filestackcontent.com/{}'.format(HANDLE)
def chunk_put_callback(request):
body = {'url': URL}
return 200, {'ETag': 'someetags'}, json.dumps(body)
@responses.activate
def test_upload_filepath():
client = Client(APIKEY)
# add the different HTTP responses that are called during the multipart upload
responses.add(
responses.POST, MULTIPART_START_URL, status=200, content_type='application/json',
json={'region': 'us-east-1', 'upload_id': 'someuuid', 'uri': 'someuri', 'location_url': 'fs-uploads.com'}
)
responses.add(
responses.POST, 'https://fs-uploads.com/multipart/upload',
status=200, content_type='application/json', json={'url': URL, 'headers': {}}
)
responses.add_callback(responses.PUT, URL, callback=chunk_put_callback)
responses.add(
responses.POST, 'https://fs-uploads.com/multipart/complete', status=200,
content_type='application/json', json={'url': URL, 'handle': HANDLE}
)
new_filelink = client.upload(filepath='tests/data/doom.mp4')
assert new_filelink.handle == HANDLE
@patch('filestack.uploads.multipart.requests.put')
@patch('filestack.uploads.multipart.requests.post')
def test_upload_file_obj(post_mock, put_mock):
start_response = defaultdict(str)
start_response['location_url'] = 'fs.api'
post_mock.side_effect = [
DummyHttpResponse(json_dict=start_response),
DummyHttpResponse(json_dict=defaultdict(str)),
DummyHttpResponse(json_dict={'handle': 'bytesHandle'})
]
put_mock.return_value = DummyHttpResponse(
json_dict=defaultdict(str), headers={'ETag': 'etag-1'}
)
file_content = b'file bytes'
filelink = Client(APIKEY).upload(file_obj=io.BytesIO(file_content))
assert filelink.handle == 'bytesHandle'
put_args, put_kwargs = put_mock.call_args
assert put_kwargs['data'] == file_content
def test_upload_chunk():
@urlmatch(netloc=r'fsuploads\.com', path='/multipart/upload', method='post', scheme='https')
def fs_backend_mock(url, request):
return {
'status_code': 200,
'content': json.dumps({
'url': 'https://amazon.com/upload', 'headers': {'one': 'two'}
})
}
@urlmatch(netloc=r'amazon\.com', path='/upload', method='put', scheme='https')
def amazon_mock(url, request):
return response(200, b'', {'ETag': 'etagX'}, reason=None, elapsed=0, request=request)
chunk = Chunk(num=123, seek_point=0, filepath='tests/data/doom.mp4')
start_response = defaultdict(str)
start_response['location_url'] = 'fsuploads.com'
with HTTMock(fs_backend_mock), HTTMock(amazon_mock):
upload_result = upload_chunk('apikey', 'filename', 's3', start_response, chunk)
assert upload_result == {'part_number': 123, 'etag': 'etagX'}
|
[
"io.BytesIO",
"httmock.response",
"httmock.urlmatch",
"responses.add",
"tests.helpers.DummyHttpResponse",
"filestack.Client",
"json.dumps",
"unittest.mock.patch",
"collections.defaultdict",
"httmock.HTTMock",
"responses.add_callback",
"filestack.uploads.multipart.upload_chunk",
"filestack.uploads.multipart.Chunk"
] |
[((1492, 1541), 'unittest.mock.patch', 'patch', (['"""filestack.uploads.multipart.requests.put"""'], {}), "('filestack.uploads.multipart.requests.put')\n", (1497, 1541), False, 'from unittest.mock import patch\n'), ((1543, 1593), 'unittest.mock.patch', 'patch', (['"""filestack.uploads.multipart.requests.post"""'], {}), "('filestack.uploads.multipart.requests.post')\n", (1548, 1593), False, 'from unittest.mock import patch\n'), ((617, 631), 'filestack.Client', 'Client', (['APIKEY'], {}), '(APIKEY)\n', (623, 631), False, 'from filestack import Client\n'), ((720, 931), 'responses.add', 'responses.add', (['responses.POST', 'MULTIPART_START_URL'], {'status': '(200)', 'content_type': '"""application/json"""', 'json': "{'region': 'us-east-1', 'upload_id': 'someuuid', 'uri': 'someuri',\n 'location_url': 'fs-uploads.com'}"}), "(responses.POST, MULTIPART_START_URL, status=200, content_type\n ='application/json', json={'region': 'us-east-1', 'upload_id':\n 'someuuid', 'uri': 'someuri', 'location_url': 'fs-uploads.com'})\n", (733, 931), False, 'import responses\n'), ((949, 1108), 'responses.add', 'responses.add', (['responses.POST', '"""https://fs-uploads.com/multipart/upload"""'], {'status': '(200)', 'content_type': '"""application/json"""', 'json': "{'url': URL, 'headers': {}}"}), "(responses.POST, 'https://fs-uploads.com/multipart/upload',\n status=200, content_type='application/json', json={'url': URL,\n 'headers': {}})\n", (962, 1108), False, 'import responses\n'), ((1127, 1198), 'responses.add_callback', 'responses.add_callback', (['responses.PUT', 'URL'], {'callback': 'chunk_put_callback'}), '(responses.PUT, URL, callback=chunk_put_callback)\n', (1149, 1198), False, 'import responses\n'), ((1203, 1367), 'responses.add', 'responses.add', (['responses.POST', '"""https://fs-uploads.com/multipart/complete"""'], {'status': '(200)', 'content_type': '"""application/json"""', 'json': "{'url': URL, 'handle': HANDLE}"}), "(responses.POST, 'https://fs-uploads.com/multipart/complete',\n status=200, content_type='application/json', json={'url': URL, 'handle':\n HANDLE})\n", (1216, 1367), False, 'import responses\n'), ((1662, 1678), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (1673, 1678), False, 'from collections import defaultdict\n'), ((2321, 2416), 'httmock.urlmatch', 'urlmatch', ([], {'netloc': '"""fsuploads\\\\.com"""', 'path': '"""/multipart/upload"""', 'method': '"""post"""', 'scheme': '"""https"""'}), "(netloc='fsuploads\\\\.com', path='/multipart/upload', method='post',\n scheme='https')\n", (2329, 2416), False, 'from httmock import HTTMock, response, urlmatch\n'), ((2646, 2723), 'httmock.urlmatch', 'urlmatch', ([], {'netloc': '"""amazon\\\\.com"""', 'path': '"""/upload"""', 'method': '"""put"""', 'scheme': '"""https"""'}), "(netloc='amazon\\\\.com', path='/upload', method='put', scheme='https')\n", (2654, 2723), False, 'from httmock import HTTMock, response, urlmatch\n'), ((2866, 2926), 'filestack.uploads.multipart.Chunk', 'Chunk', ([], {'num': '(123)', 'seek_point': '(0)', 'filepath': '"""tests/data/doom.mp4"""'}), "(num=123, seek_point=0, filepath='tests/data/doom.mp4')\n", (2871, 2926), False, 'from filestack.uploads.multipart import upload_chunk, Chunk\n'), ((2948, 2964), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (2959, 2964), False, 'from collections import defaultdict\n'), ((537, 553), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (547, 553), False, 'import json\n'), ((1763, 1806), 'tests.helpers.DummyHttpResponse', 'DummyHttpResponse', ([], {'json_dict': 'start_response'}), '(json_dict=start_response)\n', (1780, 1806), False, 'from tests.helpers import DummyHttpResponse\n'), ((1871, 1925), 'tests.helpers.DummyHttpResponse', 'DummyHttpResponse', ([], {'json_dict': "{'handle': 'bytesHandle'}"}), "(json_dict={'handle': 'bytesHandle'})\n", (1888, 1925), False, 'from tests.helpers import DummyHttpResponse\n'), ((2774, 2852), 'httmock.response', 'response', (['(200)', "b''", "{'ETag': 'etagX'}"], {'reason': 'None', 'elapsed': '(0)', 'request': 'request'}), "(200, b'', {'ETag': 'etagX'}, reason=None, elapsed=0, request=request)\n", (2782, 2852), False, 'from httmock import HTTMock, response, urlmatch\n'), ((3027, 3051), 'httmock.HTTMock', 'HTTMock', (['fs_backend_mock'], {}), '(fs_backend_mock)\n', (3034, 3051), False, 'from httmock import HTTMock, response, urlmatch\n'), ((3053, 3073), 'httmock.HTTMock', 'HTTMock', (['amazon_mock'], {}), '(amazon_mock)\n', (3060, 3073), False, 'from httmock import HTTMock, response, urlmatch\n'), ((3099, 3162), 'filestack.uploads.multipart.upload_chunk', 'upload_chunk', (['"""apikey"""', '"""filename"""', '"""s3"""', 'start_response', 'chunk'], {}), "('apikey', 'filename', 's3', start_response, chunk)\n", (3111, 3162), False, 'from filestack.uploads.multipart import upload_chunk, Chunk\n'), ((1997, 2013), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (2008, 2013), False, 'from collections import defaultdict\n'), ((2096, 2110), 'filestack.Client', 'Client', (['APIKEY'], {}), '(APIKEY)\n', (2102, 2110), False, 'from filestack import Client\n'), ((2127, 2151), 'io.BytesIO', 'io.BytesIO', (['file_content'], {}), '(file_content)\n', (2137, 2151), False, 'import io\n'), ((2524, 2599), 'json.dumps', 'json.dumps', (["{'url': 'https://amazon.com/upload', 'headers': {'one': 'two'}}"], {}), "({'url': 'https://amazon.com/upload', 'headers': {'one': 'two'}})\n", (2534, 2599), False, 'import json\n'), ((1844, 1860), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (1855, 1860), False, 'from collections import defaultdict\n')]
|
from construct import (
Float32l,
Float64l,
If,
Int8sl,
Int16sl,
Int16ul,
Int32sl,
Int32ul,
PaddedString,
Padding,
PrefixedArray,
Struct,
len_,
this,
)
def array(subcon):
return PrefixedArray(Int32ul, subcon)
Bend = Struct("time" / Float32l, "step" / Float32l, Padding(3), "UNK" / Int8sl)
Beat = Struct(
"time" / Float32l,
"measure" / Int16ul,
"beat" / Int16ul,
"phraseIteration" / Int32ul,
"mask" / Int32ul,
)
Phrase = Struct(
"solo" / Int8sl,
"disparity" / Int8sl,
"ignore" / Int8sl,
Padding(1),
"maxDifficulty" / Int32ul,
"phraseIterationLinks" / Int32ul,
"name" / PaddedString(32, encoding="utf8"),
)
ChordTemplate = Struct(
"mask" / Int32ul,
"frets" / Int8sl[6],
"fingers" / Int8sl[6],
"notes" / Int32sl[6],
"name" / PaddedString(32, encoding="utf8"),
)
ChordNote = Struct(
"mask" / Int32ul[6],
"bends" / Struct("bendValues" / Bend[32], "count" / Int32ul)[6],
"slideTo" / Int8sl[6],
"slideUnpitchTo" / Int8sl[6],
"vibrato" / Int16sl[6],
)
Vocal = Struct(
"time" / Float32l,
"note" / Int32sl,
"length" / Float32l,
"lyrics" / PaddedString(48, encoding="utf8"),
)
Texture = Struct(
"fontpath" / PaddedString(128, encoding="ascii"),
"fontpathLength" / Int32ul,
Padding(4),
"width" / Int32ul,
"height" / Int32ul,
)
BoundingBox = Struct("y0" / Float32l, "x0" / Float32l, "y1" / Float32l, "x1" / Float32l)
SymbolDef = Struct(
"name" / PaddedString(12, encoding="utf8"),
"outerRect" / BoundingBox,
"innerRect" / BoundingBox,
)
Symbols = Struct(
"header" / array(Int32sl[8]),
"texture" / array(Texture),
"definition" / array(SymbolDef),
)
PhraseIteration = Struct(
"phraseId" / Int32ul,
"time" / Float32l,
"endTime" / Float32l,
"difficulty" / Int32ul[3],
)
PhraseExtraInfo = Struct(
"phraseId" / Int32ul,
"difficulty" / Int32ul,
"empty" / Int32ul,
"levelJump" / Int8sl,
"redundant" / Int16sl,
Padding(1),
)
LinkedDiff = Struct("levelBreak" / Int32sl, "nld_phrase" / array(Int32ul))
Action = Struct("time" / Float32l, "name" / PaddedString(256, encoding="ascii"))
Event = Struct("time" / Float32l, "name" / PaddedString(256, encoding="ascii"))
Tone = Struct("time" / Float32l, "id" / Int32ul)
DNA = Struct("time" / Float32l, "id" / Int32ul)
Section = Struct(
"name" / PaddedString(32, encoding="utf8"),
"number" / Int32ul,
"startTime" / Float32l,
"endTime" / Float32l,
"startPhraseIterationId" / Int32ul,
"endPhraseIterationId" / Int32ul,
"stringMask" / Int8sl[36],
)
Anchor = Struct(
"time" / Float32l,
"endTime" / Float32l,
"UNK_time" / Float32l,
"UNK_time2" / Float32l,
"fret" / Int32sl,
"width" / Int32sl,
"phraseIterationId" / Int32ul,
)
AnchorExtension = Struct("time" / Float32l, "fret" / Int8sl, Padding(7))
FingerPrint = Struct(
"chordId" / Int32ul,
"startTime" / Float32l,
"endTime" / Float32l,
"UNK_startTime" / Float32l,
"UNK_endTime" / Float32l,
)
Note = Struct(
"mask" / Int32ul,
"flags" / Int32ul,
"hash" / Int32ul,
"time" / Float32l,
"string" / Int8sl,
"fret" / Int8sl,
"anchorFret" / Int8sl,
"anchorWidth" / Int8sl,
"chordId" / Int32ul,
"chordNoteId" / Int32ul,
"phraseId" / Int32ul,
"phraseIterationId" / Int32ul,
"fingerPrintId" / Int16ul[2],
"nextIterNote" / Int16ul,
"prevIterNote" / Int16ul,
"parentPrevNote" / Int16ul,
"slideTo" / Int8sl,
"slideUnpitchTo" / Int8sl,
"leftHand" / Int8sl,
"tap" / Int8sl,
"pickDirection" / Int8sl,
"slap" / Int8sl,
"pluck" / Int8sl,
"vibrato" / Int16sl,
"sustain" / Float32l,
"bend_time" / Float32l,
"bends" / array(Bend),
)
Level = Struct(
"difficulty" / Int32ul,
"anchors" / array(Anchor),
"anchor_extensions" / array(AnchorExtension),
"fingerprints" / array(FingerPrint)[2],
"notes" / array(Note),
"averageNotesPerIter" / array(Float32l),
"notesInIterCountNoIgnored" / array(Int32ul),
"notesInIterCount" / array(Int32ul),
)
Metadata = Struct(
"maxScores" / Float64l,
"maxNotes" / Float64l,
"maxNotesNoIgnored" / Float64l,
"pointsPerNote" / Float64l,
"firstBeatLength" / Float32l,
"startTime" / Float32l,
"capo" / Int8sl,
"lastConversionDateTime" / PaddedString(32, encoding="ascii"),
"part" / Int16sl,
"songLength" / Float32l,
"tuning" / array(Int16sl),
"firstNoteTime" / Float32l,
"firstNoteTime2" / Float32l,
"maxDifficulty" / Int32sl,
)
Song = Struct(
"beats" / array(Beat),
"phrases" / array(Phrase),
"chordTemplates" / array(ChordTemplate),
"chordNotes" / array(ChordNote),
"vocals" / array(Vocal),
"symbols" / If(len_(this.vocals) > 0, Symbols),
"phraseIterations" / array(PhraseIteration),
"phraseExtraInfos" / array(PhraseExtraInfo),
"newLinkedDiffs" / array(LinkedDiff),
"actions" / array(Action),
"events" / array(Event),
"tones" / array(Tone),
"dna" / array(DNA),
"sections" / array(Section),
"levels" / array(Level),
"metadata" / Metadata,
)
|
[
"construct.len_",
"construct.PaddedString",
"construct.PrefixedArray",
"construct.Struct",
"construct.Padding"
] |
[((361, 477), 'construct.Struct', 'Struct', (["('time' / Float32l)", "('measure' / Int16ul)", "('beat' / Int16ul)", "('phraseIteration' / Int32ul)", "('mask' / Int32ul)"], {}), "('time' / Float32l, 'measure' / Int16ul, 'beat' / Int16ul, \n 'phraseIteration' / Int32ul, 'mask' / Int32ul)\n", (367, 477), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((1424, 1498), 'construct.Struct', 'Struct', (["('y0' / Float32l)", "('x0' / Float32l)", "('y1' / Float32l)", "('x1' / Float32l)"], {}), "('y0' / Float32l, 'x0' / Float32l, 'y1' / Float32l, 'x1' / Float32l)\n", (1430, 1498), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((1775, 1876), 'construct.Struct', 'Struct', (["('phraseId' / Int32ul)", "('time' / Float32l)", "('endTime' / Float32l)", "('difficulty' / Int32ul[3])"], {}), "('phraseId' / Int32ul, 'time' / Float32l, 'endTime' / Float32l, \n 'difficulty' / Int32ul[3])\n", (1781, 1876), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((2313, 2354), 'construct.Struct', 'Struct', (["('time' / Float32l)", "('id' / Int32ul)"], {}), "('time' / Float32l, 'id' / Int32ul)\n", (2319, 2354), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((2362, 2403), 'construct.Struct', 'Struct', (["('time' / Float32l)", "('id' / Int32ul)"], {}), "('time' / Float32l, 'id' / Int32ul)\n", (2368, 2403), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((2670, 2842), 'construct.Struct', 'Struct', (["('time' / Float32l)", "('endTime' / Float32l)", "('UNK_time' / Float32l)", "('UNK_time2' / Float32l)", "('fret' / Int32sl)", "('width' / Int32sl)", "('phraseIterationId' / Int32ul)"], {}), "('time' / Float32l, 'endTime' / Float32l, 'UNK_time' / Float32l, \n 'UNK_time2' / Float32l, 'fret' / Int32sl, 'width' / Int32sl, \n 'phraseIterationId' / Int32ul)\n", (2676, 2842), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((2953, 3085), 'construct.Struct', 'Struct', (["('chordId' / Int32ul)", "('startTime' / Float32l)", "('endTime' / Float32l)", "('UNK_startTime' / Float32l)", "('UNK_endTime' / Float32l)"], {}), "('chordId' / Int32ul, 'startTime' / Float32l, 'endTime' / Float32l, \n 'UNK_startTime' / Float32l, 'UNK_endTime' / Float32l)\n", (2959, 3085), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((240, 270), 'construct.PrefixedArray', 'PrefixedArray', (['Int32ul', 'subcon'], {}), '(Int32ul, subcon)\n', (253, 270), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((325, 335), 'construct.Padding', 'Padding', (['(3)'], {}), '(3)\n', (332, 335), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((588, 598), 'construct.Padding', 'Padding', (['(1)'], {}), '(1)\n', (595, 598), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((1348, 1358), 'construct.Padding', 'Padding', (['(4)'], {}), '(4)\n', (1355, 1358), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((2052, 2062), 'construct.Padding', 'Padding', (['(1)'], {}), '(1)\n', (2059, 2062), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((2926, 2936), 'construct.Padding', 'Padding', (['(7)'], {}), '(7)\n', (2933, 2936), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((682, 715), 'construct.PaddedString', 'PaddedString', (['(32)'], {'encoding': '"""utf8"""'}), "(32, encoding='utf8')\n", (694, 715), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((857, 890), 'construct.PaddedString', 'PaddedString', (['(32)'], {'encoding': '"""utf8"""'}), "(32, encoding='utf8')\n", (869, 890), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((1202, 1235), 'construct.PaddedString', 'PaddedString', (['(48)'], {'encoding': '"""utf8"""'}), "(48, encoding='utf8')\n", (1214, 1235), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((1275, 1310), 'construct.PaddedString', 'PaddedString', (['(128)'], {'encoding': '"""ascii"""'}), "(128, encoding='ascii')\n", (1287, 1310), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((1533, 1566), 'construct.PaddedString', 'PaddedString', (['(12)'], {'encoding': '"""utf8"""'}), "(12, encoding='utf8')\n", (1545, 1566), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((2187, 2222), 'construct.PaddedString', 'PaddedString', (['(256)'], {'encoding': '"""ascii"""'}), "(256, encoding='ascii')\n", (2199, 2222), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((2268, 2303), 'construct.PaddedString', 'PaddedString', (['(256)'], {'encoding': '"""ascii"""'}), "(256, encoding='ascii')\n", (2280, 2303), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((2436, 2469), 'construct.PaddedString', 'PaddedString', (['(32)'], {'encoding': '"""utf8"""'}), "(32, encoding='utf8')\n", (2448, 2469), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((4423, 4457), 'construct.PaddedString', 'PaddedString', (['(32)'], {'encoding': '"""ascii"""'}), "(32, encoding='ascii')\n", (4435, 4457), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((954, 1004), 'construct.Struct', 'Struct', (["('bendValues' / Bend[32])", "('count' / Int32ul)"], {}), "('bendValues' / Bend[32], 'count' / Int32ul)\n", (960, 1004), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n'), ((4843, 4860), 'construct.len_', 'len_', (['this.vocals'], {}), '(this.vocals)\n', (4847, 4860), False, 'from construct import Float32l, Float64l, If, Int8sl, Int16sl, Int16ul, Int32sl, Int32ul, PaddedString, Padding, PrefixedArray, Struct, len_, this\n')]
|
# coding=utf-8
"""Given the dataset object, make a multithread enqueuer"""
import os
import queue
import threading
import contextlib
import multiprocessing
import time
import random
import sys
import utils
import traceback
import cv2
# modified from keras
class DatasetEnqueuer(object):
def __init__(
self,
dataset,
prefetch=5,
num_workers=1,
start=True, # start the dataset get thread when init
shuffle=False,
# whether to break down each mini-batch for each gpu
is_multi_gpu=False,
last_full_batch=False, # make sure the last batch is full
):
self.dataset = dataset
self.prefetch = prefetch # how many batch to save in queue
self.max_queue_size = int(self.prefetch * dataset.batch_size)
self.is_multi_gpu = is_multi_gpu
self.last_full_batch = last_full_batch
self.workers = num_workers
self.queue = None
self.run_thread = None # the thread to spawn others
self.stop_signal = None
self.cur_batch_count = 0
self.shuffle = shuffle
if start:
self.start()
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self):
self.queue = queue.Queue(self.max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def stop(self):
# print("stop called")
if self.is_running():
self._stop()
def _stop(self):
# print("_stop called")
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(0)
def __del__(self):
if self.is_running():
self._stop()
# thread to start getting batches into queue
def _run(self):
batch_idxs = list(self.dataset.valid_idxs) * self.dataset.num_epochs
if self.shuffle:
batch_idxs = random.sample(batch_idxs, len(batch_idxs))
batch_idxs = random.sample(batch_idxs, len(batch_idxs))
if self.last_full_batch:
# make sure the batch_idxs are multiplier of batch_size
batch_idxs += [
batch_idxs[-1]
for _ in range(
self.dataset.batch_size - len(batch_idxs) % self.dataset.batch_size
)
]
while True:
with contextlib.closing(
multiprocessing.pool.ThreadPool(self.workers)
) as executor:
for idx in batch_idxs:
if self.stop_signal.is_set():
return
# block until not full
self.queue.put(
executor.apply_async(self.dataset.get_sample, (idx,)),
block=True,
)
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# iterator to get batch from the queue
def get(self):
if not self.is_running():
self.start()
try:
while self.is_running():
if self.cur_batch_count == self.dataset.num_batches:
self._stop()
return
samples = []
for i in range(self.dataset.batch_size):
# first get got the ApplyResult object,
# then second get to get the actual thing (block till get)
sample = self.queue.get(block=True).get()
self.queue.task_done()
samples.append(sample)
# break the mini-batch into mini-batches for multi-gpu
if self.is_multi_gpu:
batches = []
# a list of [frames, boxes, labels_arr, ori_boxes, box_keys]
this_batch_idxs = range(len(samples))
# pack these batches for each gpu
this_batch_idxs_gpus = utils.grouper(
this_batch_idxs, self.dataset.batch_size_per_gpu
)
for this_batch_idxs_per_gpu in this_batch_idxs_gpus:
batches.append(
self.dataset.collect_batch(samples, this_batch_idxs_per_gpu)
)
batch = batches
else:
batch = self.dataset.collect_batch(samples)
self.cur_batch_count += 1
yield batch
except Exception as e: # pylint: disable=broad-except
self._stop()
_type, _value, _traceback = sys.exc_info()
print("Exception in enqueuer.get: %s" % e)
traceback.print_tb(_traceback)
raise Exception
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def count_frame_get(total_frame, frame_gap):
count = 0
cur_frame = 0
while cur_frame < total_frame:
if cur_frame % frame_gap != 0:
cur_frame += 1
continue
count += 1
cur_frame += 1
return count
class VideoEnqueuer(object):
def __init__(
self,
cfg,
vcap,
num_frame,
frame_gap=1,
prefetch=5,
start=True, # start the dataset get thread when init
is_moviepy=False,
batch_size=4,
):
self.cfg = cfg
self.vcap = vcap
self.num_frame = num_frame
self.frame_gap = frame_gap
self.is_moviepy = is_moviepy
self.batch_size = batch_size
self.prefetch = prefetch # how many batch to save in queue
self.max_queue_size = int(self.prefetch * batch_size)
self.queue = None
self.run_thread = None # the thread to spawn others
self.stop_signal = None
# how many frames we are actually gonna get due to frame gap
self.get_num_frame = count_frame_get(self.num_frame, self.frame_gap)
# compute the number of batches we gonna get so we know when to stop and exit
# last batch is not enough batch_size
self.num_batches = self.get_num_frame // batch_size + int(
self.get_num_frame % batch_size != 0
)
self.cur_batch_count = 0
if start:
self.start()
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self):
self.queue = queue.Queue(self.max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def stop(self):
# print("stop called")
if self.is_running():
self._stop()
def _stop(self):
# print("_stop called")
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(0)
def __del__(self):
if self.is_running():
self._stop()
# thread to start getting batches into queue
def _run(self):
cfg = self.cfg
frame_count = 0
while frame_count < self.num_frame:
if self.stop_signal.is_set():
return
if self.is_moviepy:
suc = True
frame = next(self.vcap)
else:
suc, frame = self.vcap.read()
if not suc:
frame_count += 1
continue
if frame_count % self.frame_gap != 0:
frame_count += 1
continue
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
im = frame.astype("float32")
resized_image = cv2.resize(im, (cfg.short_edge_size, cfg.max_size))
scale = (
resized_image.shape[0] * 1.0 / im.shape[0]
+ resized_image.shape[1] * 1.0 / im.shape[1]
) / 2.0
self.queue.put((resized_image, scale, frame_count), block=True)
frame_count += 1
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# iterator to get batch from the queue
def get(self):
if not self.is_running():
self.start()
try:
while self.is_running():
if self.cur_batch_count == self.num_batches:
self._stop()
return
batch_size = self.batch_size
# last batch
if (self.cur_batch_count == self.num_batches - 1) and (
self.get_num_frame % batch_size != 0
):
batch_size = self.get_num_frame % batch_size
samples = []
for i in range(batch_size):
sample = self.queue.get(block=True)
self.queue.task_done()
samples.append(sample)
batch = samples
self.cur_batch_count += 1
yield batch
except Exception as e: # pylint: disable=broad-except
self._stop()
_type, _value, _traceback = sys.exc_info()
print("Exception in enqueuer.get: %s" % e)
traceback.print_tb(_traceback)
raise Exception
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
|
[
"threading.Thread",
"multiprocessing.pool.ThreadPool",
"traceback.print_tb",
"utils.grouper",
"time.sleep",
"threading.Event",
"sys.exc_info",
"queue.Queue",
"cv2.resize"
] |
[((1300, 1332), 'queue.Queue', 'queue.Queue', (['self.max_queue_size'], {}), '(self.max_queue_size)\n', (1311, 1332), False, 'import queue\n'), ((1360, 1377), 'threading.Event', 'threading.Event', ([], {}), '()\n', (1375, 1377), False, 'import threading\n'), ((1405, 1439), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._run'}), '(target=self._run)\n', (1421, 1439), False, 'import threading\n'), ((6894, 6926), 'queue.Queue', 'queue.Queue', (['self.max_queue_size'], {}), '(self.max_queue_size)\n', (6905, 6926), False, 'import queue\n'), ((6954, 6971), 'threading.Event', 'threading.Event', ([], {}), '()\n', (6969, 6971), False, 'import threading\n'), ((6999, 7033), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._run'}), '(target=self._run)\n', (7015, 7033), False, 'import threading\n'), ((5176, 5191), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5186, 5191), False, 'import time\n'), ((8275, 8326), 'cv2.resize', 'cv2.resize', (['im', '(cfg.short_edge_size, cfg.max_size)'], {}), '(im, (cfg.short_edge_size, cfg.max_size))\n', (8285, 8326), False, 'import cv2\n'), ((9985, 10000), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (9995, 10000), False, 'import time\n'), ((4929, 4943), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4941, 4943), False, 'import sys\n'), ((5011, 5041), 'traceback.print_tb', 'traceback.print_tb', (['_traceback'], {}), '(_traceback)\n', (5029, 5041), False, 'import traceback\n'), ((9738, 9752), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (9750, 9752), False, 'import sys\n'), ((9820, 9850), 'traceback.print_tb', 'traceback.print_tb', (['_traceback'], {}), '(_traceback)\n', (9838, 9850), False, 'import traceback\n'), ((2663, 2708), 'multiprocessing.pool.ThreadPool', 'multiprocessing.pool.ThreadPool', (['self.workers'], {}), '(self.workers)\n', (2694, 2708), False, 'import multiprocessing\n'), ((4268, 4331), 'utils.grouper', 'utils.grouper', (['this_batch_idxs', 'self.dataset.batch_size_per_gpu'], {}), '(this_batch_idxs, self.dataset.batch_size_per_gpu)\n', (4281, 4331), False, 'import utils\n')]
|
import unittest
import events
from tests.testing_utils import is_valid_response, ValidResponseObjectTester
class TestEvents(unittest.TestCase):
def test_on_launch(self):
response = events.on_launch()
self.assertTrue(is_valid_response(response))
response_tester = ValidResponseObjectTester(response)
self.assertEqual(response_tester.get_speech_plain(),
('I can read you the Bible passage for a service or play you a past sermon. What would you '
'like? '))
self.assertFalse(response_tester.is_session_ending())
self.assertEqual(response_tester.get_card_text(), ('Hello! Ask me for the bible reading '
'for a service or a past sermon.'))
self.assertEqual(response_tester.get_card_title(), 'Christ Church Mayfair')
|
[
"events.on_launch",
"tests.testing_utils.ValidResponseObjectTester",
"tests.testing_utils.is_valid_response"
] |
[((196, 214), 'events.on_launch', 'events.on_launch', ([], {}), '()\n', (212, 214), False, 'import events\n'), ((294, 329), 'tests.testing_utils.ValidResponseObjectTester', 'ValidResponseObjectTester', (['response'], {}), '(response)\n', (319, 329), False, 'from tests.testing_utils import is_valid_response, ValidResponseObjectTester\n'), ((239, 266), 'tests.testing_utils.is_valid_response', 'is_valid_response', (['response'], {}), '(response)\n', (256, 266), False, 'from tests.testing_utils import is_valid_response, ValidResponseObjectTester\n')]
|
import imp
import os
import sys
from corsproxy.wsgi import application
sys.path.insert(0, os.path.dirname(__file__))
# wsgi = imp.load_source('wsgi', 'passenger_wsgi.py')
# application = wsgi.application
|
[
"os.path.dirname"
] |
[((91, 116), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (106, 116), False, 'import os\n')]
|
#!/usr/bin/env python
from dataclasses import dataclass
from math import log2
from typing import Callable, Optional, Tuple, Union
import click
import PIL.Image
import PIL.ImageMath
from PIL.Image import Image
from PIL.ImageChops import darker, lighter
def _normalize_offset(offset: int, size: int) -> int:
return offset if offset >= 0 else size - 1
@dataclass
class ImageSlicer:
image: Image
def _get_absolute_range(
self, item: Union[slice, int], axis: int
) -> Tuple[int, int]:
size = self.image.size[axis]
if item is None:
return 0, size
if isinstance(item, slice):
assert item.step is None
return (
0 if item.start is None else _normalize_offset(item.start, size),
size if item.stop is None else _normalize_offset(item.stop, size),
)
offset = _normalize_offset(item, size)
return offset, offset + 1
def __getitem__(
self, item: Tuple[Union[slice, int, None], Union[slice, int, None]]
) -> Image:
x, y = item
x1, x2 = self._get_absolute_range(x, 0)
y1, y2 = self._get_absolute_range(y, 1)
return self.image.crop((x1, y1, x2, y2))
def get_brightest_neighbor(image: Image, shift: int, aggregate=lighter) -> Image:
slicer = ImageSlicer(image)
orig = slicer[:-shift, :-shift]
down = slicer[:-shift, shift:]
right = slicer[shift:, :-shift]
diag = slicer[shift:, shift:]
return aggregate(aggregate(orig, down), aggregate(right, diag))
def fill(image: Image, direction: int, x: int = None, y: int = None) -> None:
def get_filler_dimension(offset: Optional[int], size: int) -> int:
if offset is None:
return size
return offset if direction == -1 else size - offset - 1
def get_filler_offset(offset: Optional[int]) -> int:
return 0 if offset is None or direction == -1 else offset + 1
slicer = ImageSlicer(image)
filler = slicer[x, y].resize(
(get_filler_dimension(x, image.width), get_filler_dimension(y, image.height))
)
image.paste(filler, (get_filler_offset(x), get_filler_offset(y)))
def get_extreme(
image: Image, steps: int, mode: Callable[[Image, Image], Image]
) -> Image:
out = PIL.Image.new(image.mode, image.size)
assert steps > 0
for step in range(steps):
shift = 2 ** step
image = get_brightest_neighbor(image, shift, mode)
out.paste(image, (shift, shift))
fill(out, direction=-1, y=shift)
fill(out, direction=1, y=out.height - shift)
fill(out, direction=-1, x=shift)
fill(out, direction=1, x=out.width - shift)
return out
@click.command()
@click.argument("input_path")
@click.argument("output_path")
@click.option("-b", "--block-size", default=0)
@click.option("-w", "--white-level", default=192)
def handle_image(input_path, output_path, block_size, white_level):
image = PIL.Image.open(input_path).convert("L")
if not block_size:
block_size = int(log2(min(image.size))) - 1
adjusted_image = PIL.ImageMath.eval(
"255 * float(image - darkest) / float(brightest - darkest) / gain",
image=image,
darkest=get_extreme(image, block_size, PIL.ImageChops.darker),
brightest=get_extreme(image, block_size, PIL.ImageChops.lighter),
gain=white_level / 255.0,
)
adjusted_image.convert("L").save(output_path)
if __name__ == "__main__":
handle_image()
|
[
"click.option",
"click.argument",
"click.command"
] |
[((2690, 2705), 'click.command', 'click.command', ([], {}), '()\n', (2703, 2705), False, 'import click\n'), ((2707, 2735), 'click.argument', 'click.argument', (['"""input_path"""'], {}), "('input_path')\n", (2721, 2735), False, 'import click\n'), ((2737, 2766), 'click.argument', 'click.argument', (['"""output_path"""'], {}), "('output_path')\n", (2751, 2766), False, 'import click\n'), ((2768, 2813), 'click.option', 'click.option', (['"""-b"""', '"""--block-size"""'], {'default': '(0)'}), "('-b', '--block-size', default=0)\n", (2780, 2813), False, 'import click\n'), ((2815, 2863), 'click.option', 'click.option', (['"""-w"""', '"""--white-level"""'], {'default': '(192)'}), "('-w', '--white-level', default=192)\n", (2827, 2863), False, 'import click\n')]
|
from flask_task import Task
from models import *
import time
task = Task()
print(task)
@task.decorator
def proceso():
users = Users.query.all()
print(users)
time.sleep(7)
|
[
"flask_task.Task",
"time.sleep"
] |
[((69, 75), 'flask_task.Task', 'Task', ([], {}), '()\n', (73, 75), False, 'from flask_task import Task\n'), ((171, 184), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (181, 184), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.core.exceptions import ValidationError
from django.forms.models import model_to_dict, modelform_factory
from model_mommy.mommy import Mommy
import pytest
from fakeapp.models import GenericIPAddressFieldModel
from strictmodels import MODEL_MOMMY_MAPPING
def test_StrictGenericIPAddressField_no_args():
value = GenericIPAddressFieldModel()
@pytest.mark.django_db
def test_StrictGenericIPAddressField_save():
x = GenericIPAddressFieldModel(field='127.0.0.1')
x.save()
assert model_to_dict(x) == model_to_dict(GenericIPAddressFieldModel.objects.get(pk=x.pk))
@pytest.mark.django_db
def test_StrictGenericIPAddressField_mommy():
mommy = Mommy(model=GenericIPAddressFieldModel)
mommy.type_mapping.update(MODEL_MOMMY_MAPPING)
mommy.prepare()
mommy.make()
@pytest.mark.django_db
def test_StrictGenericIPAddressField_form_with_instance_valid():
x = GenericIPAddressFieldModel(field='127.0.0.1')
form_class = modelform_factory(model=GenericIPAddressFieldModel, fields=['field'])
form = form_class(data={'field': '255.255.255.255'}, instance=x)
assert form.is_valid() is True
assert form.errors == {}
assert form.save().field == '255.255.255.255'
def test_StrictGenericIPAddressField_form_with_instance_invalid():
x = GenericIPAddressFieldModel(field='127.0.0.1')
form_class = modelform_factory(model=GenericIPAddressFieldModel, fields=['field'])
form = form_class(data={'field': 'ghost'}, instance=x)
assert form.is_valid() is False
assert form.errors == {'field': ['Enter a valid IPv4 or IPv6 address.']}
@pytest.mark.django_db
def test_StrictGenericIPAddressField_form_without_instance_valid():
form_class = modelform_factory(model=GenericIPAddressFieldModel, fields=['field'])
form = form_class(data={'field': '255.255.255.255'})
assert form.is_valid() is True
assert form.errors == {}
assert form.save().field == '255.255.255.255'
def test_StrictGenericIPAddressField_form_without_instance_invalid():
form_class = modelform_factory(model=GenericIPAddressFieldModel, fields=['field'])
form = form_class(data={'field': 'shark'})
assert form.is_valid() is False
assert form.errors == {'field': ['Enter a valid IPv4 or IPv6 address.']}
def test_StrictGenericIPAddressField_descriptor_doesnt_disappear():
"""
don't clobber the descriptor
"""
value = GenericIPAddressFieldModel(field='127.0.0.1')
assert value.field == '127.0.0.1'
value.field = '127.0.0.2'
assert value.field == '127.0.0.2'
with pytest.raises(ValidationError):
value.field = 'v'*256
assert value.field == '127.0.0.2'
value.field = '192.168.0.1'
assert value.field == '192.168.0.1'
value.field = None
def test_StrictGenericIPAddressField_values_error_length():
"""
Once an input is too long, error loudly.
ValidationError: Ensure this value has at most 255 characters (it has 256)
"""
ok = '2001:0db8:85a3:0042:1000:8a2e:0370:7334'
notok = '2001:0db8:85a3:0042:1000:8a2e:0370:7334a'
assert GenericIPAddressFieldModel(field=ok).field == '2001:db8:85a3:42:1000:8a2e:370:7334' # noqa
with pytest.raises(ValidationError):
GenericIPAddressFieldModel(field=notok)
def test_StrictGenericIPAddressField_null_skips_cleaning():
GenericIPAddressFieldModel(field=None)
def test_StrictGenericIPAddressField_ok_until_changed():
"""
Ensure this value cannot change to an invalid state after being set
"""
model = GenericIPAddressFieldModel(field='2001:0::0:01')
with pytest.raises(ValidationError):
model.field = 't'*256
@pytest.mark.django_db
def test_StrictGenericIPAddressField_create_via_queryset():
"""
ValidationError: Ensure this value has at most 255 characters (it has 256)
"""
assert GenericIPAddressFieldModel.objects.count() == 0
with pytest.raises(ValidationError):
GenericIPAddressFieldModel.objects.create(field='t'*256)
assert GenericIPAddressFieldModel.objects.count() == 0
@pytest.mark.django_db
def test_StrictGenericIPAddressField_update_via_queryset_invalid_then_get():
model = GenericIPAddressFieldModel.objects.create(field='127.0.0.1')
model.__class__.objects.filter(pk=model.pk).update(field='2.2.2.2.2.2.2.2')
with pytest.raises(ValidationError):
model.__class__.objects.get(pk=model.pk)
|
[
"fakeapp.models.GenericIPAddressFieldModel.objects.get",
"django.forms.models.modelform_factory",
"fakeapp.models.GenericIPAddressFieldModel",
"model_mommy.mommy.Mommy",
"pytest.raises",
"django.forms.models.model_to_dict",
"fakeapp.models.GenericIPAddressFieldModel.objects.create",
"fakeapp.models.GenericIPAddressFieldModel.objects.count"
] |
[((501, 529), 'fakeapp.models.GenericIPAddressFieldModel', 'GenericIPAddressFieldModel', ([], {}), '()\n', (527, 529), False, 'from fakeapp.models import GenericIPAddressFieldModel\n'), ((608, 653), 'fakeapp.models.GenericIPAddressFieldModel', 'GenericIPAddressFieldModel', ([], {'field': '"""127.0.0.1"""'}), "(field='127.0.0.1')\n", (634, 653), False, 'from fakeapp.models import GenericIPAddressFieldModel\n'), ((844, 883), 'model_mommy.mommy.Mommy', 'Mommy', ([], {'model': 'GenericIPAddressFieldModel'}), '(model=GenericIPAddressFieldModel)\n', (849, 883), False, 'from model_mommy.mommy import Mommy\n'), ((1070, 1115), 'fakeapp.models.GenericIPAddressFieldModel', 'GenericIPAddressFieldModel', ([], {'field': '"""127.0.0.1"""'}), "(field='127.0.0.1')\n", (1096, 1115), False, 'from fakeapp.models import GenericIPAddressFieldModel\n'), ((1133, 1202), 'django.forms.models.modelform_factory', 'modelform_factory', ([], {'model': 'GenericIPAddressFieldModel', 'fields': "['field']"}), "(model=GenericIPAddressFieldModel, fields=['field'])\n", (1150, 1202), False, 'from django.forms.models import model_to_dict, modelform_factory\n'), ((1463, 1508), 'fakeapp.models.GenericIPAddressFieldModel', 'GenericIPAddressFieldModel', ([], {'field': '"""127.0.0.1"""'}), "(field='127.0.0.1')\n", (1489, 1508), False, 'from fakeapp.models import GenericIPAddressFieldModel\n'), ((1526, 1595), 'django.forms.models.modelform_factory', 'modelform_factory', ([], {'model': 'GenericIPAddressFieldModel', 'fields': "['field']"}), "(model=GenericIPAddressFieldModel, fields=['field'])\n", (1543, 1595), False, 'from django.forms.models import model_to_dict, modelform_factory\n'), ((1878, 1947), 'django.forms.models.modelform_factory', 'modelform_factory', ([], {'model': 'GenericIPAddressFieldModel', 'fields': "['field']"}), "(model=GenericIPAddressFieldModel, fields=['field'])\n", (1895, 1947), False, 'from django.forms.models import model_to_dict, modelform_factory\n'), ((2208, 2277), 'django.forms.models.modelform_factory', 'modelform_factory', ([], {'model': 'GenericIPAddressFieldModel', 'fields': "['field']"}), "(model=GenericIPAddressFieldModel, fields=['field'])\n", (2225, 2277), False, 'from django.forms.models import model_to_dict, modelform_factory\n'), ((2569, 2614), 'fakeapp.models.GenericIPAddressFieldModel', 'GenericIPAddressFieldModel', ([], {'field': '"""127.0.0.1"""'}), "(field='127.0.0.1')\n", (2595, 2614), False, 'from fakeapp.models import GenericIPAddressFieldModel\n'), ((3493, 3531), 'fakeapp.models.GenericIPAddressFieldModel', 'GenericIPAddressFieldModel', ([], {'field': 'None'}), '(field=None)\n', (3519, 3531), False, 'from fakeapp.models import GenericIPAddressFieldModel\n'), ((3692, 3740), 'fakeapp.models.GenericIPAddressFieldModel', 'GenericIPAddressFieldModel', ([], {'field': '"""2001:0::0:01"""'}), "(field='2001:0::0:01')\n", (3718, 3740), False, 'from fakeapp.models import GenericIPAddressFieldModel\n'), ((4330, 4390), 'fakeapp.models.GenericIPAddressFieldModel.objects.create', 'GenericIPAddressFieldModel.objects.create', ([], {'field': '"""127.0.0.1"""'}), "(field='127.0.0.1')\n", (4371, 4390), False, 'from fakeapp.models import GenericIPAddressFieldModel\n'), ((678, 694), 'django.forms.models.model_to_dict', 'model_to_dict', (['x'], {}), '(x)\n', (691, 694), False, 'from django.forms.models import model_to_dict, modelform_factory\n'), ((2730, 2760), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (2743, 2760), False, 'import pytest\n'), ((3345, 3375), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (3358, 3375), False, 'import pytest\n'), ((3385, 3424), 'fakeapp.models.GenericIPAddressFieldModel', 'GenericIPAddressFieldModel', ([], {'field': 'notok'}), '(field=notok)\n', (3411, 3424), False, 'from fakeapp.models import GenericIPAddressFieldModel\n'), ((3750, 3780), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (3763, 3780), False, 'import pytest\n'), ((4003, 4045), 'fakeapp.models.GenericIPAddressFieldModel.objects.count', 'GenericIPAddressFieldModel.objects.count', ([], {}), '()\n', (4043, 4045), False, 'from fakeapp.models import GenericIPAddressFieldModel\n'), ((4060, 4090), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (4073, 4090), False, 'import pytest\n'), ((4100, 4158), 'fakeapp.models.GenericIPAddressFieldModel.objects.create', 'GenericIPAddressFieldModel.objects.create', ([], {'field': "('t' * 256)"}), "(field='t' * 256)\n", (4141, 4158), False, 'from fakeapp.models import GenericIPAddressFieldModel\n'), ((4168, 4210), 'fakeapp.models.GenericIPAddressFieldModel.objects.count', 'GenericIPAddressFieldModel.objects.count', ([], {}), '()\n', (4208, 4210), False, 'from fakeapp.models import GenericIPAddressFieldModel\n'), ((4480, 4510), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (4493, 4510), False, 'import pytest\n'), ((712, 759), 'fakeapp.models.GenericIPAddressFieldModel.objects.get', 'GenericIPAddressFieldModel.objects.get', ([], {'pk': 'x.pk'}), '(pk=x.pk)\n', (750, 759), False, 'from fakeapp.models import GenericIPAddressFieldModel\n'), ((3245, 3281), 'fakeapp.models.GenericIPAddressFieldModel', 'GenericIPAddressFieldModel', ([], {'field': 'ok'}), '(field=ok)\n', (3271, 3281), False, 'from fakeapp.models import GenericIPAddressFieldModel\n')]
|
from peewee import SqliteDatabase, Model
from peewee import IntegerField, CharField, PrimaryKeyField, TimestampField
from pathlib import Path
from configparser import ConfigParser
config = ConfigParser()
config.read("config.ini", encoding="utf-8")
db = SqliteDatabase(Path.cwd() / config.get('main', 'database_file'))
class BaseModel(Model):
class Meta:
database = db
class Topic(BaseModel):
id = PrimaryKeyField(null=False)
title = CharField()
link = CharField()
ext_id = IntegerField()
saved_on = TimestampField()
announced_on = TimestampField()
class Meta:
db_table = 'topics'
db.connect()
if not Topic.table_exists():
Topic.create_table()
|
[
"peewee.PrimaryKeyField",
"peewee.IntegerField",
"peewee.CharField",
"peewee.TimestampField",
"pathlib.Path.cwd",
"configparser.ConfigParser"
] |
[((195, 209), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (207, 209), False, 'from configparser import ConfigParser\n'), ((436, 463), 'peewee.PrimaryKeyField', 'PrimaryKeyField', ([], {'null': '(False)'}), '(null=False)\n', (451, 463), False, 'from peewee import IntegerField, CharField, PrimaryKeyField, TimestampField\n'), ((477, 488), 'peewee.CharField', 'CharField', ([], {}), '()\n', (486, 488), False, 'from peewee import IntegerField, CharField, PrimaryKeyField, TimestampField\n'), ((501, 512), 'peewee.CharField', 'CharField', ([], {}), '()\n', (510, 512), False, 'from peewee import IntegerField, CharField, PrimaryKeyField, TimestampField\n'), ((527, 541), 'peewee.IntegerField', 'IntegerField', ([], {}), '()\n', (539, 541), False, 'from peewee import IntegerField, CharField, PrimaryKeyField, TimestampField\n'), ((558, 574), 'peewee.TimestampField', 'TimestampField', ([], {}), '()\n', (572, 574), False, 'from peewee import IntegerField, CharField, PrimaryKeyField, TimestampField\n'), ((595, 611), 'peewee.TimestampField', 'TimestampField', ([], {}), '()\n', (609, 611), False, 'from peewee import IntegerField, CharField, PrimaryKeyField, TimestampField\n'), ((278, 288), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (286, 288), False, 'from pathlib import Path\n')]
|
from warnings import filterwarnings
PAD = "<PAD>"
UNK = "<UNK>"
MASK = "<MASK>"
BOS = "<BOS>"
EOS = "<EOS>"
def filter_warnings():
# "The dataloader does not have many workers which may be a bottleneck."
filterwarnings("ignore", category=UserWarning, module="pytorch_lightning.trainer.data_loading", lineno=102)
filterwarnings("ignore", category=UserWarning, module="pytorch_lightning.utilities.data", lineno=41)
# "Please also save or load the state of the optimizer when saving or loading the scheduler."
filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler", lineno=216) # save
filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler", lineno=234) # load
|
[
"warnings.filterwarnings"
] |
[((215, 327), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {'category': 'UserWarning', 'module': '"""pytorch_lightning.trainer.data_loading"""', 'lineno': '(102)'}), "('ignore', category=UserWarning, module=\n 'pytorch_lightning.trainer.data_loading', lineno=102)\n", (229, 327), False, 'from warnings import filterwarnings\n'), ((327, 432), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {'category': 'UserWarning', 'module': '"""pytorch_lightning.utilities.data"""', 'lineno': '(41)'}), "('ignore', category=UserWarning, module=\n 'pytorch_lightning.utilities.data', lineno=41)\n", (341, 432), False, 'from warnings import filterwarnings\n'), ((530, 628), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {'category': 'UserWarning', 'module': '"""torch.optim.lr_scheduler"""', 'lineno': '(216)'}), "('ignore', category=UserWarning, module=\n 'torch.optim.lr_scheduler', lineno=216)\n", (544, 628), False, 'from warnings import filterwarnings\n'), ((636, 734), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {'category': 'UserWarning', 'module': '"""torch.optim.lr_scheduler"""', 'lineno': '(234)'}), "('ignore', category=UserWarning, module=\n 'torch.optim.lr_scheduler', lineno=234)\n", (650, 734), False, 'from warnings import filterwarnings\n')]
|
import logging
import re
import yaml
LOG = logging.getLogger(__name__)
def error_override_vars(override_vars, spec_filepath):
"""
Warn user if any given variable name isn't found in the original spec file.
"""
if override_vars is None:
return
original_text = open(spec_filepath, "r").read()
for variable in override_vars.keys():
if variable not in original_text:
raise ValueError(
f"Command line override variable '{variable}' not found in spec file '{spec_filepath}'."
)
def replace_override_vars(full_text, env, override_vars):
"""
Given the full text of a yaml spec, return the full
text with user variable overrides in the 'env' block.
The env yaml block looks like:
env:
variables:
.......
.......
The regex will find and match to the above yaml block.
"""
updated_env = dict(env)
if override_vars is not None:
for key, val in env.items():
updated_env[key].update(override_vars)
updated_env = {"env": updated_env}
dump = yaml.dump(updated_env, default_flow_style=False, sort_keys=False)
updated_env_text = f"\n{dump}\n"
env_block_pattern = r"\nenv\s*:\s*(\n+( |\t)+.*)+\n*"
regex = re.compile(env_block_pattern)
updated_full_text = re.sub(regex, updated_env_text, full_text)
return updated_full_text
def dump_with_overrides(spec, override_vars):
dumped_text = spec.dump()
if override_vars is None:
return dumped_text
result = replace_override_vars(
full_text=dumped_text, env=spec.environment, override_vars=override_vars
)
return result
|
[
"yaml.dump",
"re.sub",
"logging.getLogger",
"re.compile"
] |
[((46, 73), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (63, 73), False, 'import logging\n'), ((1128, 1193), 'yaml.dump', 'yaml.dump', (['updated_env'], {'default_flow_style': '(False)', 'sort_keys': '(False)'}), '(updated_env, default_flow_style=False, sort_keys=False)\n', (1137, 1193), False, 'import yaml\n'), ((1302, 1331), 're.compile', 're.compile', (['env_block_pattern'], {}), '(env_block_pattern)\n', (1312, 1331), False, 'import re\n'), ((1356, 1398), 're.sub', 're.sub', (['regex', 'updated_env_text', 'full_text'], {}), '(regex, updated_env_text, full_text)\n', (1362, 1398), False, 'import re\n')]
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ResourceTypeMetadata(object):
"""
Describes resources of a given type within a package.
"""
def __init__(self, **kwargs):
"""
Initializes a new ResourceTypeMetadata object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param resource_type:
The value to assign to the resource_type property of this ResourceTypeMetadata.
:type resource_type: str
:param properties:
The value to assign to the properties property of this ResourceTypeMetadata.
:type properties: list[oci.oda.models.MetadataProperty]
"""
self.swagger_types = {
'resource_type': 'str',
'properties': 'list[MetadataProperty]'
}
self.attribute_map = {
'resource_type': 'resourceType',
'properties': 'properties'
}
self._resource_type = None
self._properties = None
@property
def resource_type(self):
"""
Gets the resource_type of this ResourceTypeMetadata.
The type of the resource described by this metadata object.
:return: The resource_type of this ResourceTypeMetadata.
:rtype: str
"""
return self._resource_type
@resource_type.setter
def resource_type(self, resource_type):
"""
Sets the resource_type of this ResourceTypeMetadata.
The type of the resource described by this metadata object.
:param resource_type: The resource_type of this ResourceTypeMetadata.
:type: str
"""
self._resource_type = resource_type
@property
def properties(self):
"""
Gets the properties of this ResourceTypeMetadata.
Any properties needed to describe the content and its usage for this resource type, and within the containing package.
:return: The properties of this ResourceTypeMetadata.
:rtype: list[oci.oda.models.MetadataProperty]
"""
return self._properties
@properties.setter
def properties(self, properties):
"""
Sets the properties of this ResourceTypeMetadata.
Any properties needed to describe the content and its usage for this resource type, and within the containing package.
:param properties: The properties of this ResourceTypeMetadata.
:type: list[oci.oda.models.MetadataProperty]
"""
self._properties = properties
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
[
"oci.util.formatted_flat_dict"
] |
[((3128, 3153), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', (['self'], {}), '(self)\n', (3147, 3153), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')]
|
import pygame
import pyscroll
from project.entities.player import Player
from .constants import FONTS, SCREEN_SCALE
from .world import World
class Renderer:
def __init__(self, width: int, height: int):
self.screen: pygame.Surface = None
self.surface: pygame.Surface = None
self._set_screen(width, height)
self.map_layer = None
self.group = None
def load_world(self, world: World):
w, h = self.screen.get_size()
self.map_layer = pyscroll.BufferedRenderer(world.map_data,
(w / SCREEN_SCALE, h / SCREEN_SCALE),
clamp_camera=True)
self.group = pyscroll.PyscrollGroup(map_layer=self.map_layer, default_layer=4)
self.group.add(world.player)
self.group.add(world.lemons)
self.group.add(world.juice, layer=5)
def draw(self, player: Player):
# Prevents the camera from tracking the player when moving left
camera_pos = list(player.rect.center)
camera_pos[0] = max(camera_pos[0], player.max_x)
self.group.center(camera_pos)
self.group.draw(self.surface)
def draw_score(self, score):
text = f'Lemons: {score}'
font = pygame.font.Font(FONTS['monogram'], 16)
font_surface: pygame.Surface = font.render(text, False, pygame.Color('white'))
x = self.surface.get_size()[0] - font_surface.get_width()
self.surface.blit(font_surface, (x - 4, 4))
def resize(self, width, height):
self._set_screen(width, height)
self.map_layer.set_size((width / SCREEN_SCALE, height / SCREEN_SCALE))
def _draw_debug_info(self, player: Player, col_event):
# TODO: Move somewhere else?
text = repr(player).split('\n')
if col_event:
text.extend((
f'Collision: {col_event.collision}',
f'Position: {col_event.position} (offset: {col_event.offset})',
f'Surface: {col_event.surface}'
))
font = pygame.font.Font(FONTS['monogram'], 16)
height = 0
for line in text:
font_surface: pygame.Surface = font.render(line, False, pygame.Color('white'))
bg_surface: pygame.Surface = pygame.Surface(font_surface.get_size(), pygame.SRCALPHA, 32)
bg_surface.fill((51, 51, 51, 159))
bg_surface.blit(font_surface, (0, 0))
self.surface.blit(bg_surface, (0, height))
height += font_surface.get_height()
def update(self, player: Player, score: int, debug: bool, col_event):
self.draw(player)
self.draw_score(score)
if debug:
self._draw_debug_info(player, col_event)
# Resizes the surface and sets it as the new screen.
pygame.transform.scale(self.surface, self.screen.get_size(), self.screen)
pygame.display.flip() # Updates the display.
def _set_screen(self, width, height):
"""Simple wrapper to keep the screen resizeable."""
self.screen = pygame.display.set_mode((width, height), pygame.RESIZABLE)
self.surface = pygame.Surface((width / SCREEN_SCALE, height / SCREEN_SCALE)).convert()
|
[
"pygame.Surface",
"pyscroll.PyscrollGroup",
"pygame.display.set_mode",
"pygame.Color",
"pygame.display.flip",
"pyscroll.BufferedRenderer",
"pygame.font.Font"
] |
[((497, 599), 'pyscroll.BufferedRenderer', 'pyscroll.BufferedRenderer', (['world.map_data', '(w / SCREEN_SCALE, h / SCREEN_SCALE)'], {'clamp_camera': '(True)'}), '(world.map_data, (w / SCREEN_SCALE, h /\n SCREEN_SCALE), clamp_camera=True)\n', (522, 599), False, 'import pyscroll\n'), ((719, 784), 'pyscroll.PyscrollGroup', 'pyscroll.PyscrollGroup', ([], {'map_layer': 'self.map_layer', 'default_layer': '(4)'}), '(map_layer=self.map_layer, default_layer=4)\n', (741, 784), False, 'import pyscroll\n'), ((1276, 1315), 'pygame.font.Font', 'pygame.font.Font', (["FONTS['monogram']", '(16)'], {}), "(FONTS['monogram'], 16)\n", (1292, 1315), False, 'import pygame\n'), ((2075, 2114), 'pygame.font.Font', 'pygame.font.Font', (["FONTS['monogram']", '(16)'], {}), "(FONTS['monogram'], 16)\n", (2091, 2114), False, 'import pygame\n'), ((2908, 2929), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (2927, 2929), False, 'import pygame\n'), ((3079, 3137), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(width, height)', 'pygame.RESIZABLE'], {}), '((width, height), pygame.RESIZABLE)\n', (3102, 3137), False, 'import pygame\n'), ((1380, 1401), 'pygame.Color', 'pygame.Color', (['"""white"""'], {}), "('white')\n", (1392, 1401), False, 'import pygame\n'), ((2228, 2249), 'pygame.Color', 'pygame.Color', (['"""white"""'], {}), "('white')\n", (2240, 2249), False, 'import pygame\n'), ((3161, 3222), 'pygame.Surface', 'pygame.Surface', (['(width / SCREEN_SCALE, height / SCREEN_SCALE)'], {}), '((width / SCREEN_SCALE, height / SCREEN_SCALE))\n', (3175, 3222), False, 'import pygame\n')]
|
from Main.Environments.Connect4 import Constants, Utils
from Tests.Environments.Connect4 import testCasesRawEvaluate
from unittest import TestCase
import numpy as np
class TestCreateMirroredStateAndPolicy(TestCase):
def testMirrorState(self):
AMOUNT_OF_TESTS_PER_CASE = 10
for case in testCasesRawEvaluate.TEST_CASES:
board = np.array(case[0])
for p in [-1, 1]:
convState = Utils.state2ConvState(board, p)
convStates = [convState for i in range(AMOUNT_OF_TESTS_PER_CASE)]
randomPolices = [np.random.random(7) for i in range(AMOUNT_OF_TESTS_PER_CASE)]
mirrorStates, mirrorPolices = Utils.createMirroredStateAndPolicy(convStates, randomPolices)
reMirrorStates, reMirrorPolices = Utils.createMirroredStateAndPolicy(mirrorStates, mirrorPolices)
for i in range(len(randomPolices)):
assert np.array_equal(randomPolices[i], reMirrorPolices[i])
for m in reMirrorStates:
assert np.array_equal(convState, m)
|
[
"Main.Environments.Connect4.Utils.state2ConvState",
"Main.Environments.Connect4.Utils.createMirroredStateAndPolicy",
"numpy.random.random",
"numpy.array",
"numpy.array_equal"
] |
[((372, 389), 'numpy.array', 'np.array', (['case[0]'], {}), '(case[0])\n', (380, 389), True, 'import numpy as np\n'), ((450, 481), 'Main.Environments.Connect4.Utils.state2ConvState', 'Utils.state2ConvState', (['board', 'p'], {}), '(board, p)\n', (471, 481), False, 'from Main.Environments.Connect4 import Constants, Utils\n'), ((710, 771), 'Main.Environments.Connect4.Utils.createMirroredStateAndPolicy', 'Utils.createMirroredStateAndPolicy', (['convStates', 'randomPolices'], {}), '(convStates, randomPolices)\n', (744, 771), False, 'from Main.Environments.Connect4 import Constants, Utils\n'), ((823, 886), 'Main.Environments.Connect4.Utils.createMirroredStateAndPolicy', 'Utils.createMirroredStateAndPolicy', (['mirrorStates', 'mirrorPolices'], {}), '(mirrorStates, mirrorPolices)\n', (857, 886), False, 'from Main.Environments.Connect4 import Constants, Utils\n'), ((599, 618), 'numpy.random.random', 'np.random.random', (['(7)'], {}), '(7)\n', (615, 618), True, 'import numpy as np\n'), ((970, 1022), 'numpy.array_equal', 'np.array_equal', (['randomPolices[i]', 'reMirrorPolices[i]'], {}), '(randomPolices[i], reMirrorPolices[i])\n', (984, 1022), True, 'import numpy as np\n'), ((1095, 1123), 'numpy.array_equal', 'np.array_equal', (['convState', 'm'], {}), '(convState, m)\n', (1109, 1123), True, 'import numpy as np\n')]
|
from PyQt5.QtCore import QThread, pyqtSignal
import settings
import socket
import Hotkey_Press
class Sock_Conn(QThread):
closeDiag = pyqtSignal()
def __init__(self):
QThread.__init__(self)
def __del__(self):
self.wait()
def run(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((settings.ip, settings.port))
s.listen(5)
while settings.conn_stat:
c, addr = s.accept()
code_encoded = c.recv(1024)
code_decoded = code_encoded.decode('utf-8')
if (code_decoded == "Conn"):
settings.socket_flag = 1
self.closeDiag.emit()
else:
Hotkey_Press.Hotkey(code_decoded)
c.close()
s.close()
|
[
"PyQt5.QtCore.pyqtSignal",
"socket.socket",
"Hotkey_Press.Hotkey",
"PyQt5.QtCore.QThread.__init__"
] |
[((146, 158), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (156, 158), False, 'from PyQt5.QtCore import QThread, pyqtSignal\n'), ((195, 217), 'PyQt5.QtCore.QThread.__init__', 'QThread.__init__', (['self'], {}), '(self)\n', (211, 217), False, 'from PyQt5.QtCore import QThread, pyqtSignal\n'), ((300, 349), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (313, 349), False, 'import socket\n'), ((743, 776), 'Hotkey_Press.Hotkey', 'Hotkey_Press.Hotkey', (['code_decoded'], {}), '(code_decoded)\n', (762, 776), False, 'import Hotkey_Press\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
import sqlparse
from superset.sql_parse import ParsedQuery
def test_cte_with_comments_is_select():
"""
Some CTES with comments are not correctly identified as SELECTS.
"""
sql = ParsedQuery(
"""WITH blah AS
(SELECT * FROM core_dev.manager_team),
blah2 AS
(SELECT * FROM core_dev.manager_workspace)
SELECT * FROM blah
INNER JOIN blah2 ON blah2.team_id = blah.team_id"""
)
assert sql.is_select()
sql = ParsedQuery(
"""WITH blah AS
/*blahblahbalh*/
(SELECT * FROM core_dev.manager_team),
--blahblahbalh
blah2 AS
(SELECT * FROM core_dev.manager_workspace)
SELECT * FROM blah
INNER JOIN blah2 ON blah2.team_id = blah.team_id"""
)
assert sql.is_select()
def test_cte_is_select():
"""
Some CTEs are not correctly identified as SELECTS.
"""
# `AS(` gets parsed as a function
sql = ParsedQuery(
"""WITH foo AS(
SELECT
FLOOR(__time TO WEEK) AS "week",
name,
COUNT(DISTINCT user_id) AS "unique_users"
FROM "druid"."my_table"
GROUP BY 1,2
)
SELECT
f.week,
f.name,
f.unique_users
FROM foo f"""
)
assert sql.is_select()
def test_unknown_select():
"""
Test that `is_select` works when sqlparse fails to identify the type.
"""
sql = "WITH foo AS(SELECT 1) SELECT 1"
assert sqlparse.parse(sql)[0].get_type() == "UNKNOWN"
assert ParsedQuery(sql).is_select()
sql = "WITH foo AS(SELECT 1) INSERT INTO my_table (a) VALUES (1)"
assert sqlparse.parse(sql)[0].get_type() == "UNKNOWN"
assert not ParsedQuery(sql).is_select()
sql = "WITH foo AS(SELECT 1) DELETE FROM my_table"
assert sqlparse.parse(sql)[0].get_type() == "UNKNOWN"
assert not ParsedQuery(sql).is_select()
|
[
"superset.sql_parse.ParsedQuery",
"sqlparse.parse"
] |
[((1015, 1221), 'superset.sql_parse.ParsedQuery', 'ParsedQuery', (['"""WITH blah AS\n (SELECT * FROM core_dev.manager_team),\n\nblah2 AS\n (SELECT * FROM core_dev.manager_workspace)\n\nSELECT * FROM blah\nINNER JOIN blah2 ON blah2.team_id = blah.team_id"""'], {}), '(\n """WITH blah AS\n (SELECT * FROM core_dev.manager_team),\n\nblah2 AS\n (SELECT * FROM core_dev.manager_workspace)\n\nSELECT * FROM blah\nINNER JOIN blah2 ON blah2.team_id = blah.team_id"""\n )\n', (1026, 1221), False, 'from superset.sql_parse import ParsedQuery\n'), ((1264, 1502), 'superset.sql_parse.ParsedQuery', 'ParsedQuery', (['"""WITH blah AS\n/*blahblahbalh*/\n (SELECT * FROM core_dev.manager_team),\n--blahblahbalh\n\nblah2 AS\n (SELECT * FROM core_dev.manager_workspace)\n\nSELECT * FROM blah\nINNER JOIN blah2 ON blah2.team_id = blah.team_id"""'], {}), '(\n """WITH blah AS\n/*blahblahbalh*/\n (SELECT * FROM core_dev.manager_team),\n--blahblahbalh\n\nblah2 AS\n (SELECT * FROM core_dev.manager_workspace)\n\nSELECT * FROM blah\nINNER JOIN blah2 ON blah2.team_id = blah.team_id"""\n )\n', (1275, 1502), False, 'from superset.sql_parse import ParsedQuery\n'), ((1681, 1910), 'superset.sql_parse.ParsedQuery', 'ParsedQuery', (['"""WITH foo AS(\nSELECT\n FLOOR(__time TO WEEK) AS "week",\n name,\n COUNT(DISTINCT user_id) AS "unique_users"\nFROM "druid"."my_table"\nGROUP BY 1,2\n)\nSELECT\n f.week,\n f.name,\n f.unique_users\nFROM foo f"""'], {}), '(\n """WITH foo AS(\nSELECT\n FLOOR(__time TO WEEK) AS "week",\n name,\n COUNT(DISTINCT user_id) AS "unique_users"\nFROM "druid"."my_table"\nGROUP BY 1,2\n)\nSELECT\n f.week,\n f.name,\n f.unique_users\nFROM foo f"""\n )\n', (1692, 1910), False, 'from superset.sql_parse import ParsedQuery\n'), ((2173, 2189), 'superset.sql_parse.ParsedQuery', 'ParsedQuery', (['sql'], {}), '(sql)\n', (2184, 2189), False, 'from superset.sql_parse import ParsedQuery\n'), ((2346, 2362), 'superset.sql_parse.ParsedQuery', 'ParsedQuery', (['sql'], {}), '(sql)\n', (2357, 2362), False, 'from superset.sql_parse import ParsedQuery\n'), ((2504, 2520), 'superset.sql_parse.ParsedQuery', 'ParsedQuery', (['sql'], {}), '(sql)\n', (2515, 2520), False, 'from superset.sql_parse import ParsedQuery\n'), ((2115, 2134), 'sqlparse.parse', 'sqlparse.parse', (['sql'], {}), '(sql)\n', (2129, 2134), False, 'import sqlparse\n'), ((2284, 2303), 'sqlparse.parse', 'sqlparse.parse', (['sql'], {}), '(sql)\n', (2298, 2303), False, 'import sqlparse\n'), ((2442, 2461), 'sqlparse.parse', 'sqlparse.parse', (['sql'], {}), '(sql)\n', (2456, 2461), False, 'import sqlparse\n')]
|
"""updateByLine.py Begun Apr 10, 2014
This program is intended to be rather general.
The 'changein' file consists of a sequence of line pairs:
nn old old-text
nn new new-text
nn is the line number (starting at 1) in the input vcp file.
'old' and 'new' are fixed.
old-text should be identical to the text of line nn in input vcp file.
new-text is the replacement for line nn, written to the output vcp file.
'changein' file should be utf-8 encoded.
Nov 16, 2014 comment line
May 30, 2017. Allow for 'ins' (insert) and 'del' (delete) in addition to 'new'
1234 old xyz
1234 ins uvw
1234 old xyz
1234 del
NOTE: This introduces complications regarding line numbers.
The interpretation is that
(a) the line number (1234) represents the line number in the INPUT file
(b) For 'ins', the inserted line ('uvw') is inserted AFTER this line
(c) For 'del', the text part is ignored (should typically be blank,
and there should be a space character after 'del': '1234 del '
Nov 27, 2018. Changed print X to print(X), for python3 compatibility.
"""
#
from __future__ import print_function
import re,sys
import codecs
class Change(object):
def __init__(self,n,oldline,newline):
self.n = n
m = re.search(r'^([0-9]+) old (.*)$',oldline)
m1 = re.search(r'^([0-9]+) (new|ins|del) (.*)$',newline)
if (not m) or (not m1):
print('Change error(1) @ line %s:' % n)
out= 'oldline=%s' % oldline
print(out.encode('utf-8'))
out= 'newline=%s' % newline
print(out.encode('utf-8'))
exit(1)
self.chgcode = m1.group(2)
nold = m.group(1)
m = re.search(r'^([0-9]+) old (.*)$',oldline)
oldtext = m.group(2)
nnew = m1.group(1)
newtext = m1.group(3)
if nold != nnew:
print('Change error(2) @ line %s:' % n)
print('nold(%s) != nnew(%s)' % (nold,nnew))
out= 'oldline=%s' % oldline
print(out.encode('utf-8'))
out= 'newline=%s' % newline
print(out.encode('utf-8'))
exit(1)
if (not m) or (not m1):
print('Change error(2) @ line %s:' % n)
out= 'oldline=%s' % oldline
print(out.encode('utf-8'))
out= 'newline=%s' % newline
print(out.encode('utf-8'))
exit(1)
self.lnumstr = nold # same as nnew
self.oldtext = oldtext
self.newtext = newtext
def init_changein(changein ):
changes = [] # ret
f = codecs.open(changein,encoding='utf-8',mode='r')
n = 0
sep='XXXX'
for line in f:
line = line.rstrip('\r\n')
if line.startswith(';'): # skip comment line
continue
n = n + 1
if (n % 2) == 1:
oldline = line
else:
newline = line
chgrec = Change(n-1,oldline,newline)
changes.append(chgrec)
f.close()
if (n % 2) != 0:
print("ERROR init_changein: Expected EVEN number of lines in",changein)
exit(1)
return changes
def update(filein,changein,fileout):
# determine change structure from changein file
changes = init_changein(changein)
# initialize input records
with codecs.open(filein,encoding='utf-8',mode='r') as f:
# recs is a list of lines, to accomodate 'ins' and 'del'
recs = [[line.rstrip('\n\r')] for line in f]
print(len(recs),"lines read from",filein)
# process change records
# counter for each type ('new','ins','del') of change record
counter = {}
for change in changes:
lnum = int(change.lnumstr)
irec = lnum - 1 # since lnum assumed to start at 1
try:
oldrec = recs[irec]
except:
print("lnum error: ",change.lnumstr)
exit(1)
# oldrec is a list of lines, typically with just 1 line.
# We assume there is always at least 1 element in this tuple, and
# that it's text matches the 'oldtext' of the change
if len(oldrec)==0:
print("update ERROR #1. record has been deleted for linenum=",lnum)
exit(1)
oldtext = oldrec[0]
if oldtext != change.oldtext:
print("CHANGE ERROR #2: Old mismatch line %s of %s" %(change.n,changein))
print("Change record lnum =",lnum)
out = "Change old text:\n%s" % change.oldtext
print(out.encode('utf-8'))
out = "Change old input:\n%s" % oldtext
print(out.encode('utf-8'))
out = "line from %s:" % filein
print(out.encode('utf-8'))
exit(1)
code = change.chgcode
# update counter
if code not in counter:
counter[code] = 0
counter[code] = counter[code] + 1
if code == 'new':
# a simple change. Make this to the last in list of oldrecs
oldrec.pop() # remove last record
oldrec.append(change.newtext) # insert new text at end
recs[irec] = oldrec
elif code == 'ins':
# insert new text onto end of oldrec
oldrec.append(change.newtext)
recs[irec] = oldrec
elif code == 'del':
# remove text from end
oldrec.pop() # remove last record
recs[irec] = oldrec
# write all records to fileout
fout = codecs.open(fileout,'w','utf-8')
nout = 0
for rec in recs:
# rec is a list of strings, possibly empty
for text in rec:
fout.write("%s\n" % text)
nout = nout + 1
fout.close()
# write summary of changes performed
print(nout,"records written to",fileout)
print("%s change transactions from %s" % (len(changes),changein))
# summary of types of changes transacted
codes = counter.keys()
outarr = ["%s of type %s"%(counter[key],key) for key in codes]
out = ', '.join(outarr)
print(out)
if __name__=="__main__":
filein = sys.argv[1]
changein = sys.argv[2]
fileout = sys.argv[3]
update(filein,changein,fileout)
|
[
"re.search",
"codecs.open"
] |
[((2283, 2332), 'codecs.open', 'codecs.open', (['changein'], {'encoding': '"""utf-8"""', 'mode': '"""r"""'}), "(changein, encoding='utf-8', mode='r')\n", (2294, 2332), False, 'import codecs\n'), ((4660, 4694), 'codecs.open', 'codecs.open', (['fileout', '"""w"""', '"""utf-8"""'], {}), "(fileout, 'w', 'utf-8')\n", (4671, 4694), False, 'import codecs\n'), ((1225, 1266), 're.search', 're.search', (['"""^([0-9]+) old (.*)$"""', 'oldline'], {}), "('^([0-9]+) old (.*)$', oldline)\n", (1234, 1266), False, 'import re, sys\n'), ((1274, 1325), 're.search', 're.search', (['"""^([0-9]+) (new|ins|del) (.*)$"""', 'newline'], {}), "('^([0-9]+) (new|ins|del) (.*)$', newline)\n", (1283, 1325), False, 'import re, sys\n'), ((1583, 1624), 're.search', 're.search', (['"""^([0-9]+) old (.*)$"""', 'oldline'], {}), "('^([0-9]+) old (.*)$', oldline)\n", (1592, 1624), False, 'import re, sys\n'), ((2880, 2927), 'codecs.open', 'codecs.open', (['filein'], {'encoding': '"""utf-8"""', 'mode': '"""r"""'}), "(filein, encoding='utf-8', mode='r')\n", (2891, 2927), False, 'import codecs\n')]
|
'''
VizUtil.py
Utilities for displaying satellite images,
with (optional) bound-box annotations
'''
import numpy as np
from matplotlib import pylab
import os
import skimage.color
def imshow(Im, block=False, figID=1):
figH = pylab.figure(num=figID)
figH.clf()
pylab.imshow(Im)
pylab.draw()
pylab.show(block=block)
def showExamples(PMat, Nsubplots=9, block=False, figID=1, W=1, H=1):
nRow = int(np.floor(np.sqrt(Nsubplots)))
nCol = int(np.ceil(Nsubplots/ float(nRow)))
figH, axH = pylab.subplots(nRow, nCol, num=figID, figsize=(W*nCol, H*nRow))
Kplot = np.minimum(PMat.shape[0], Nsubplots)
for kk in range(Kplot):
pylab.subplot(nRow, nCol, kk+1)
if PMat[kk].ndim == 3:
pylab.imshow(PMat[kk], interpolation='nearest')
else:
pylab.imshow(PMat[kk], interpolation='nearest', cmap='gray')
pylab.axis('image')
pylab.xticks([])
pylab.yticks([])
# Disable visibility for unused subplots
for kk in range(Kplot, nRow*nCol):
pylab.subplot(nRow, nCol, kk+1)
pylab.axis('off')
pylab.draw()
pylab.show(block=block)
def save_fig_as_png(savepath, figID=1):
figH = pylab.figure(num=figID)
pylab.draw()
if not os.path.exists(savepath) and not savepath.count(os.path.sep):
savepath = os.path.join(DEFAULTSAVEPATH, savepath)
pylab.xticks([])
pylab.yticks([])
pylab.savefig(savepath, bbox_inches = 'tight', pad_inches = 0)
def makeImageWithBBoxAnnotations(Im, BBox, BBox2=None,
boxcolor=[0,1,0], # green
boxcolor2=[1,1,0], # yellow
**kwargs):
''' Create color image with bounding boxes highlighted in color
'''
if Im.ndim < 3:
AIm = skimage.color.gray2rgb(Im)
else:
AIm = Im.copy() # annotation shouldn't happen to original array
_add_bbox_to_im_inplace(AIm, BBox, boxcolor)
if BBox2 is not None:
_add_bbox_to_im_inplace(AIm, BBox2, boxcolor2)
return AIm
def _add_bbox_to_im_inplace(Im, BBox, boxcolor, doThickLines=1):
BBox = np.asarray(BBox, dtype=np.int32)
boxcolor = np.asarray(boxcolor, dtype=np.float64)
if boxcolor.max() > 1:
boxcolor = boxcolor / 255
for r in xrange(BBox.shape[0]):
Im[BBox[r,0]:BBox[r,1], BBox[r,2]] = boxcolor[np.newaxis,:]
Im[BBox[r,0]:BBox[r,1], BBox[r,3]-1] = boxcolor[np.newaxis,:]
Im[BBox[r,0], BBox[r,2]:BBox[r,3]] = boxcolor[np.newaxis,:]
Im[BBox[r,1]-1, BBox[r,2]:BBox[r,3]] = boxcolor[np.newaxis,:]
## Draw thick lines by repeating this cmd
## but slightly shifting BBox coords +1 or -1 pixel
if doThickLines:
for inc in [-1, +1]:
ABox = BBox + inc
np.maximum(ABox, 0, out=ABox)
np.minimum(ABox[:,1], Im.shape[0], out=ABox[:,1])
np.minimum(ABox[:,3], Im.shape[1], out=ABox[:,3])
_add_bbox_to_im_inplace(Im, ABox, boxcolor, doThickLines=0)
"""
def showMostConfidentFalseNegatives(Ytrue, Phat, Nsubplots=9):
if Phat.ndim > 1:
Phat = Phat[:,-1] # use final column, which is probability of 1
Yhat = np.asarray(Phat > 0.5, dtype=Ytrue.dtype)
falseNegIDs = np.flatnonzero( np.logical_and(Yhat == 0, Yhat != Ytrue))
print 'FALSE NEG: %d/%d' % (len(falseNegIDs), np.sum(Ytrue==1))
if len(falseNegIDs) == 0:
return None
# Sort false positives from smallest probability to largest
sortIDs = np.argsort( Phat[falseNegIDs] )
falseNegIDs = falseNegIDs[sortIDs[:Nsubplots]]
#print ' ', falseNegIDs, Phat[falseNegIDs]
PosIms, _ = loadTestImages(testGroupIDs, falseNegIDs, None)
return plotImages(PosIms, Nsubplots=Nsubplots)
def showMostConfidentFalsePositives(Ytrue, Phat, Nsubplots=9):
if Phat.ndim > 1:
Phat = Phat[:,-1] # use final column, which is probability of 1
Yhat = np.asarray(Phat > 0.5, dtype=Ytrue.dtype)
falsePosIDs = np.flatnonzero( np.logical_and(Yhat == 1, Yhat != Ytrue))
print 'FALSE POS: %d/%d' % (len(falsePosIDs), np.sum(Ytrue==0))
if len(falsePosIDs) == 0:
return None
# Sort false positives from largest probability to smallest
sortIDs = np.argsort( -1*Phat[falsePosIDs] )
falsePosIDs = falsePosIDs[sortIDs[:Nsubplots]]
#print ' ', falsePosIDs, Phat[falsePosIDs]
_, NegIms = loadTestImages(testGroupIDs, None, falsePosIDs)
return plotImages(NegIms, Nsubplots=Nsubplots)
"""
|
[
"matplotlib.pylab.savefig",
"matplotlib.pylab.xticks",
"numpy.minimum",
"numpy.maximum",
"matplotlib.pylab.subplot",
"matplotlib.pylab.imshow",
"numpy.asarray",
"matplotlib.pylab.figure",
"os.path.exists",
"matplotlib.pylab.axis",
"matplotlib.pylab.yticks",
"matplotlib.pylab.subplots",
"numpy.sqrt",
"os.path.join",
"matplotlib.pylab.draw",
"matplotlib.pylab.show"
] |
[((228, 251), 'matplotlib.pylab.figure', 'pylab.figure', ([], {'num': 'figID'}), '(num=figID)\n', (240, 251), False, 'from matplotlib import pylab\n'), ((267, 283), 'matplotlib.pylab.imshow', 'pylab.imshow', (['Im'], {}), '(Im)\n', (279, 283), False, 'from matplotlib import pylab\n'), ((286, 298), 'matplotlib.pylab.draw', 'pylab.draw', ([], {}), '()\n', (296, 298), False, 'from matplotlib import pylab\n'), ((301, 324), 'matplotlib.pylab.show', 'pylab.show', ([], {'block': 'block'}), '(block=block)\n', (311, 324), False, 'from matplotlib import pylab\n'), ((499, 566), 'matplotlib.pylab.subplots', 'pylab.subplots', (['nRow', 'nCol'], {'num': 'figID', 'figsize': '(W * nCol, H * nRow)'}), '(nRow, nCol, num=figID, figsize=(W * nCol, H * nRow))\n', (513, 566), False, 'from matplotlib import pylab\n'), ((575, 611), 'numpy.minimum', 'np.minimum', (['PMat.shape[0]', 'Nsubplots'], {}), '(PMat.shape[0], Nsubplots)\n', (585, 611), True, 'import numpy as np\n'), ((1039, 1051), 'matplotlib.pylab.draw', 'pylab.draw', ([], {}), '()\n', (1049, 1051), False, 'from matplotlib import pylab\n'), ((1054, 1077), 'matplotlib.pylab.show', 'pylab.show', ([], {'block': 'block'}), '(block=block)\n', (1064, 1077), False, 'from matplotlib import pylab\n'), ((1128, 1151), 'matplotlib.pylab.figure', 'pylab.figure', ([], {'num': 'figID'}), '(num=figID)\n', (1140, 1151), False, 'from matplotlib import pylab\n'), ((1154, 1166), 'matplotlib.pylab.draw', 'pylab.draw', ([], {}), '()\n', (1164, 1166), False, 'from matplotlib import pylab\n'), ((1295, 1311), 'matplotlib.pylab.xticks', 'pylab.xticks', (['[]'], {}), '([])\n', (1307, 1311), False, 'from matplotlib import pylab\n'), ((1314, 1330), 'matplotlib.pylab.yticks', 'pylab.yticks', (['[]'], {}), '([])\n', (1326, 1330), False, 'from matplotlib import pylab\n'), ((1333, 1391), 'matplotlib.pylab.savefig', 'pylab.savefig', (['savepath'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "(savepath, bbox_inches='tight', pad_inches=0)\n", (1346, 1391), False, 'from matplotlib import pylab\n'), ((2037, 2069), 'numpy.asarray', 'np.asarray', (['BBox'], {'dtype': 'np.int32'}), '(BBox, dtype=np.int32)\n', (2047, 2069), True, 'import numpy as np\n'), ((2083, 2121), 'numpy.asarray', 'np.asarray', (['boxcolor'], {'dtype': 'np.float64'}), '(boxcolor, dtype=np.float64)\n', (2093, 2121), True, 'import numpy as np\n'), ((642, 675), 'matplotlib.pylab.subplot', 'pylab.subplot', (['nRow', 'nCol', '(kk + 1)'], {}), '(nRow, nCol, kk + 1)\n', (655, 675), False, 'from matplotlib import pylab\n'), ((837, 856), 'matplotlib.pylab.axis', 'pylab.axis', (['"""image"""'], {}), "('image')\n", (847, 856), False, 'from matplotlib import pylab\n'), ((861, 877), 'matplotlib.pylab.xticks', 'pylab.xticks', (['[]'], {}), '([])\n', (873, 877), False, 'from matplotlib import pylab\n'), ((882, 898), 'matplotlib.pylab.yticks', 'pylab.yticks', (['[]'], {}), '([])\n', (894, 898), False, 'from matplotlib import pylab\n'), ((983, 1016), 'matplotlib.pylab.subplot', 'pylab.subplot', (['nRow', 'nCol', '(kk + 1)'], {}), '(nRow, nCol, kk + 1)\n', (996, 1016), False, 'from matplotlib import pylab\n'), ((1019, 1036), 'matplotlib.pylab.axis', 'pylab.axis', (['"""off"""'], {}), "('off')\n", (1029, 1036), False, 'from matplotlib import pylab\n'), ((1253, 1292), 'os.path.join', 'os.path.join', (['DEFAULTSAVEPATH', 'savepath'], {}), '(DEFAULTSAVEPATH, savepath)\n', (1265, 1292), False, 'import os\n'), ((417, 435), 'numpy.sqrt', 'np.sqrt', (['Nsubplots'], {}), '(Nsubplots)\n', (424, 435), True, 'import numpy as np\n'), ((707, 754), 'matplotlib.pylab.imshow', 'pylab.imshow', (['PMat[kk]'], {'interpolation': '"""nearest"""'}), "(PMat[kk], interpolation='nearest')\n", (719, 754), False, 'from matplotlib import pylab\n'), ((771, 831), 'matplotlib.pylab.imshow', 'pylab.imshow', (['PMat[kk]'], {'interpolation': '"""nearest"""', 'cmap': '"""gray"""'}), "(PMat[kk], interpolation='nearest', cmap='gray')\n", (783, 831), False, 'from matplotlib import pylab\n'), ((1176, 1200), 'os.path.exists', 'os.path.exists', (['savepath'], {}), '(savepath)\n', (1190, 1200), False, 'import os\n'), ((2645, 2674), 'numpy.maximum', 'np.maximum', (['ABox', '(0)'], {'out': 'ABox'}), '(ABox, 0, out=ABox)\n', (2655, 2674), True, 'import numpy as np\n'), ((2681, 2732), 'numpy.minimum', 'np.minimum', (['ABox[:, 1]', 'Im.shape[0]'], {'out': 'ABox[:, 1]'}), '(ABox[:, 1], Im.shape[0], out=ABox[:, 1])\n', (2691, 2732), True, 'import numpy as np\n'), ((2737, 2788), 'numpy.minimum', 'np.minimum', (['ABox[:, 3]', 'Im.shape[1]'], {'out': 'ABox[:, 3]'}), '(ABox[:, 3], Im.shape[1], out=ABox[:, 3])\n', (2747, 2788), True, 'import numpy as np\n')]
|
#
# Collective Knowledge (caffe CK front-end)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: cTuning foundation, <EMAIL>, http://cTuning.org
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# crowd-benchmark caffe
def crowdbench(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
i['action']='crowdsource'
i['module_uoa']=cfg['module_deps']['experiment.bench.caffe']
return ck.access(i)
##############################################################################
# TBD: classification demo using webcam + benchmarking/tuning via CK
def demo(i):
"""
Input: {
(camera_id) - camera ID
(delay) - delay
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
# Deps
import time
import cv2
import os
# Prepare tmp entry if doesn't exist
duoa=cfg['demo']['data_uoa']
image_name=cfg['demo']['image_name']
r=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['tmp'],
'data_uoa':duoa})
if r['return']>0:
if r['return']!=16: return r
r=ck.access({'action':'add',
'module_uoa':cfg['module_deps']['tmp'],
'data_uoa':duoa})
if r['return']>0: return r
p=r['path']
pf=os.path.join(p, image_name)
# Initialize web cam
ci=int(i.get('camera_id',0))
dl=int(i.get('delay',1))
wcam = cv2.VideoCapture(ci)
# Permanent loop
while True:
ck.out('Obtaining picture from webcam ...')
s, img = wcam.read()
if s: # frame captured without any errors
# cv2.namedWindow("cam-test")
# cv2.imshow("cam-test",img)
# destroyWindow("cam-test")
cv2.imwrite(pf,img)
time.sleep(dl)
return {'return':0}
##############################################################################
# autotune Caffe workloads
def autotune(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
i['module_uoa']=cfg['module_deps']['program']
i['data_uoa']='caffe'
i['explore']='yes'
i['extra_tags']='dnn'
i['skip_collaborative']='yes'
i['skip_pruning']='yes'
i['iterations']=-1
i['new']='yes'
i['cmd_keys']=['time_cpu','time_gpu']
return ck.access(i)
|
[
"cv2.VideoCapture",
"cv2.imwrite",
"os.path.join",
"time.sleep"
] |
[((2291, 2318), 'os.path.join', 'os.path.join', (['p', 'image_name'], {}), '(p, image_name)\n', (2303, 2318), False, 'import os\n'), ((2419, 2439), 'cv2.VideoCapture', 'cv2.VideoCapture', (['ci'], {}), '(ci)\n', (2435, 2439), False, 'import cv2\n'), ((2768, 2782), 'time.sleep', 'time.sleep', (['dl'], {}), '(dl)\n', (2778, 2782), False, 'import time\n'), ((2740, 2760), 'cv2.imwrite', 'cv2.imwrite', (['pf', 'img'], {}), '(pf, img)\n', (2751, 2760), False, 'import cv2\n')]
|
import re
from node import Node
last_result = {'line':'','level':0}
def convertStringListToNode(str_list, rex_list, current_level=0, node=Node()):
while len(str_list) > 0:
line = str_list[0]
line_level = getLineLevel(line, rex_list)
if (line_level > current_level):
childNode = Node()
node.addChild(childNode)
convertStringListToNode(str_list,rex_list,current_level+1,childNode)
if (line_level == current_level):
if line_level == len(rex_list):
str_list.remove(line)
if node.val == '':
node.val = line
else:
node.val = node.val + "\n" + line
elif line_level == 0:
str_list.remove(line)
if node.val == '':
node.val = line
else:
node.setNext(Node())
node = node.next
node.val = line
elif node.val != '':
return
elif node.val == '':
str_list.remove(line)
node.val = line
continue
if line_level < current_level:
return
def getLineLevel(str, rex_list):
if str == last_result['line']:
return last_result['level']
last_result['line'] = str
for x in rex_list:
if x.match(str):
last_result['level'] = rex_list.index(x)
return last_result['level']
last_result['level'] = len(rex_list)
return last_result['level']
|
[
"node.Node"
] |
[((145, 151), 'node.Node', 'Node', ([], {}), '()\n', (149, 151), False, 'from node import Node\n'), ((330, 336), 'node.Node', 'Node', ([], {}), '()\n', (334, 336), False, 'from node import Node\n'), ((939, 945), 'node.Node', 'Node', ([], {}), '()\n', (943, 945), False, 'from node import Node\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with <EMAIL>
from mock import patch
from unittest import TestCase
from fiwareskuld.utils.rotated_files import rotate_files as rf
import os
class TestRotatedFiles(TestCase):
def test_rotated_files_complete_files(self):
# Given
name = 'kk'
max_level = 100
rename_to = 'foo'
# /usr/src/Python-1.5/Makefile
with patch('fiwareskuld.utils.rotated_files.glob') as mockglob:
mockglob.glob.return_value = ['kk', 'kk.001', 'kk.002', 'kk.003', 'kk.004', 'kk.005']
expected_value = ['kk.001', 'kk.002', 'kk.003', 'kk.004', 'kk.005', 'kk.006']
d = {k: v for k, v in zip(mockglob.glob.return_value, expected_value)}
with patch.object(os, 'rename') as mockrename:
mockrename.return_value = None
# When
rf(name=name, max_level=max_level, rename_to=rename_to)
# Then
# Check the number of calls to the os.rename method.
self.assertEquals(mockrename.call_count, len(mockglob.glob.return_value),
"The rename operator will not called for all the values in the directory")
# Check that we made all the os.rename calls with the proper name file.
for k, v in d.iteritems():
mockrename.assert_any_call(k, v)
def test_rotated_files_with_only_one_file_with_number(self):
# Given
name = 'fake'
max_level = 100
rename_to = 'foo'
# /usr/src/Python-1.5/Makefile
with patch('fiwareskuld.utils.rotated_files.glob') as mockglob:
mockglob.glob.return_value = ['fake.001']
expected_value = ['fake.002']
d = {k: v for k, v in zip(mockglob.glob.return_value, expected_value)}
with patch.object(os, 'rename') as mockrename:
mockrename.return_value = None
# When
rf(name=name, max_level=max_level, rename_to=rename_to)
# Then
self.assertEquals(mockrename.call_count, len(mockglob.glob.return_value),
"The rename operator will not called for all the values in the directory")
# Check that we made all the os.rename calls with the proper name file.
for k, v in d.iteritems():
mockrename.assert_any_call(k, v)
def test_rotated_files_with_only_one_file_without_number(self):
# Given
name = 'fake'
max_level = 100
rename_to = 'foo'
# /usr/src/Python-1.5/Makefile
with patch('fiwareskuld.utils.rotated_files.glob') as mockglob:
mockglob.glob.return_value = ['fake']
expected_value = ['fake.001']
d = {k: v for k, v in zip(mockglob.glob.return_value, expected_value)}
with patch.object(os, 'rename') as mockrename:
mockrename.return_value = None
# When
rf(name=name, max_level=max_level, rename_to=rename_to)
# Then
self.assertEquals(mockrename.call_count, len(mockglob.glob.return_value),
"The rename operator will not called for all the values in the directory")
# Check that we made all the os.rename calls with the proper name file.
for k, v in d.iteritems():
mockrename.assert_any_call(k, v)
def test_rotated_files_with_max_level(self):
# Given
name = 'kk'
max_level = 4
rename_to = 'foo'
# /usr/src/Python-1.5/Makefile
with patch('fiwareskuld.utils.rotated_files.glob') as mockglob:
mockglob.glob.return_value = ['kk', 'kk.001', 'kk.002', 'kk.003']
expected_value = ['kk.001', 'kk.002', 'kk.003', 'foo']
d = {k: v for k, v in zip(mockglob.glob.return_value, expected_value)}
with patch.object(os, 'rename') as mockrename:
mockrename.return_value = None
# When
rf(name=name, max_level=max_level, rename_to=rename_to)
# Then
self.assertEquals(mockrename.call_count, len(mockglob.glob.return_value),
"The rename operator will not called for all the values in the directory")
# Check that we made all the os.rename calls with the proper name file.
for k, v in d.iteritems():
mockrename.assert_any_call(k, v)
|
[
"mock.patch.object",
"fiwareskuld.utils.rotated_files.rotate_files",
"mock.patch"
] |
[((1128, 1173), 'mock.patch', 'patch', (['"""fiwareskuld.utils.rotated_files.glob"""'], {}), "('fiwareskuld.utils.rotated_files.glob')\n", (1133, 1173), False, 'from mock import patch\n'), ((2346, 2391), 'mock.patch', 'patch', (['"""fiwareskuld.utils.rotated_files.glob"""'], {}), "('fiwareskuld.utils.rotated_files.glob')\n", (2351, 2391), False, 'from mock import patch\n'), ((3406, 3451), 'mock.patch', 'patch', (['"""fiwareskuld.utils.rotated_files.glob"""'], {}), "('fiwareskuld.utils.rotated_files.glob')\n", (3411, 3451), False, 'from mock import patch\n'), ((4439, 4484), 'mock.patch', 'patch', (['"""fiwareskuld.utils.rotated_files.glob"""'], {}), "('fiwareskuld.utils.rotated_files.glob')\n", (4444, 4484), False, 'from mock import patch\n'), ((1477, 1503), 'mock.patch.object', 'patch.object', (['os', '"""rename"""'], {}), "(os, 'rename')\n", (1489, 1503), False, 'from mock import patch\n'), ((1606, 1661), 'fiwareskuld.utils.rotated_files.rotate_files', 'rf', ([], {'name': 'name', 'max_level': 'max_level', 'rename_to': 'rename_to'}), '(name=name, max_level=max_level, rename_to=rename_to)\n', (1608, 1661), True, 'from fiwareskuld.utils.rotated_files import rotate_files as rf\n'), ((2603, 2629), 'mock.patch.object', 'patch.object', (['os', '"""rename"""'], {}), "(os, 'rename')\n", (2615, 2629), False, 'from mock import patch\n'), ((2732, 2787), 'fiwareskuld.utils.rotated_files.rotate_files', 'rf', ([], {'name': 'name', 'max_level': 'max_level', 'rename_to': 'rename_to'}), '(name=name, max_level=max_level, rename_to=rename_to)\n', (2734, 2787), True, 'from fiwareskuld.utils.rotated_files import rotate_files as rf\n'), ((3659, 3685), 'mock.patch.object', 'patch.object', (['os', '"""rename"""'], {}), "(os, 'rename')\n", (3671, 3685), False, 'from mock import patch\n'), ((3788, 3843), 'fiwareskuld.utils.rotated_files.rotate_files', 'rf', ([], {'name': 'name', 'max_level': 'max_level', 'rename_to': 'rename_to'}), '(name=name, max_level=max_level, rename_to=rename_to)\n', (3790, 3843), True, 'from fiwareskuld.utils.rotated_files import rotate_files as rf\n'), ((4745, 4771), 'mock.patch.object', 'patch.object', (['os', '"""rename"""'], {}), "(os, 'rename')\n", (4757, 4771), False, 'from mock import patch\n'), ((4874, 4929), 'fiwareskuld.utils.rotated_files.rotate_files', 'rf', ([], {'name': 'name', 'max_level': 'max_level', 'rename_to': 'rename_to'}), '(name=name, max_level=max_level, rename_to=rename_to)\n', (4876, 4929), True, 'from fiwareskuld.utils.rotated_files import rotate_files as rf\n')]
|
from integration_test import __version__
import os
import subprocess
import urllib.request
import json
import time
from pathlib import Path
def test_basic():
tm = os.getenv('TENDERMINT')
tmhome = os.getenv('TMHOME')
tmkms = os.getenv('TMKMS')
kmsconfig = os.getenv('TMKMSCONFIG')
tmkms_proc = subprocess.Popen([tmkms, "start", "-c", kmsconfig], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
tm_proc = subprocess.Popen([tm, "node", "--home", tmhome], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
contents = None
start_time = time.perf_counter()
timeout = 30
rpc_base = "http://127.0.0.1:26657"
status_url = rpc_base + "/status"
block_url = rpc_base + "/block"
while True:
try:
contents = urllib.request.urlopen(status_url).read()
break
except Exception as e:
time.sleep(1)
if time.perf_counter() - start_time >= timeout:
print(e)
tm_output = tm_proc.stdout.readlines()
os.system("pkill -9 " + tmkms)
tmkms_output = tmkms_proc.stdout.readlines()
tmkms_err = tmkms_proc.stderr.readlines()
raise TimeoutError('Waited too long for the RPC port.\n tm: {}\ntmkms output:{}\ntmkms error: {}'.format(tm_output, tmkms_output, tmkms_err)) from e
time.sleep(5)
contents = urllib.request.urlopen(status_url).read()
status = json.loads(contents)
block_height = int(status["result"]["sync_info"]["latest_block_height"])
assert block_height >= 1
contents = urllib.request.urlopen(block_url).read()
block = json.loads(contents)
validator_address = block['result']['block']['last_commit']['signatures'][0]['validator_address']
genesis_path = tmhome + "/config/genesis.json"
genesis = json.loads(Path(genesis_path).read_text())
assert validator_address == genesis["validators"][0]["address"].upper()
|
[
"subprocess.Popen",
"json.loads",
"time.perf_counter",
"os.system",
"time.sleep",
"pathlib.Path",
"os.getenv"
] |
[((168, 191), 'os.getenv', 'os.getenv', (['"""TENDERMINT"""'], {}), "('TENDERMINT')\n", (177, 191), False, 'import os\n'), ((205, 224), 'os.getenv', 'os.getenv', (['"""TMHOME"""'], {}), "('TMHOME')\n", (214, 224), False, 'import os\n'), ((237, 255), 'os.getenv', 'os.getenv', (['"""TMKMS"""'], {}), "('TMKMS')\n", (246, 255), False, 'import os\n'), ((272, 296), 'os.getenv', 'os.getenv', (['"""TMKMSCONFIG"""'], {}), "('TMKMSCONFIG')\n", (281, 296), False, 'import os\n'), ((314, 417), 'subprocess.Popen', 'subprocess.Popen', (["[tmkms, 'start', '-c', kmsconfig]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "([tmkms, 'start', '-c', kmsconfig], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n", (330, 417), False, 'import subprocess\n'), ((429, 529), 'subprocess.Popen', 'subprocess.Popen', (["[tm, 'node', '--home', tmhome]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "([tm, 'node', '--home', tmhome], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n", (445, 529), False, 'import subprocess\n'), ((564, 583), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (581, 583), False, 'import time\n'), ((1359, 1372), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1369, 1372), False, 'import time\n'), ((1443, 1463), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (1453, 1463), False, 'import json\n'), ((1638, 1658), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (1648, 1658), False, 'import json\n'), ((870, 883), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (880, 883), False, 'import time\n'), ((1837, 1855), 'pathlib.Path', 'Path', (['genesis_path'], {}), '(genesis_path)\n', (1841, 1855), False, 'from pathlib import Path\n'), ((1040, 1070), 'os.system', 'os.system', (["('pkill -9 ' + tmkms)"], {}), "('pkill -9 ' + tmkms)\n", (1049, 1070), False, 'import os\n'), ((899, 918), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (916, 918), False, 'import time\n')]
|
# pylint: disable=C0111
from setuptools import setup
with open("README.md", "r") as fh:
README = fh.read()
setup(
name='oneforge',
version='0.1.0',
description='1Forge REST API wrapper',
long_description=README,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='https://github.com/orgito/1forge-client',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
],
keywords='1forge forex',
packages=['oneforge'],
setup_requires=['setuptools>=38.6.0'],
install_requires=['requests'],
python_requires='>=3.6',
project_urls={
'Bug Reports': 'https://github.com/orgito/1forge-client/issues',
'Source': 'https://github.com/orgito/1forge-client',
},
)
|
[
"setuptools.setup"
] |
[((113, 999), 'setuptools.setup', 'setup', ([], {'name': '"""oneforge"""', 'version': '"""0.1.0"""', 'description': '"""1Forge REST API wrapper"""', 'long_description': 'README', 'long_description_content_type': '"""text/markdown"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'maintainer': '"""<NAME>"""', 'maintainer_email': '"""<EMAIL>"""', 'url': '"""https://github.com/orgito/1forge-client"""', 'license': '"""MIT"""', 'classifiers': "['Development Status :: 4 - Beta', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries']", 'keywords': '"""1forge forex"""', 'packages': "['oneforge']", 'setup_requires': "['setuptools>=38.6.0']", 'install_requires': "['requests']", 'python_requires': '""">=3.6"""', 'project_urls': "{'Bug Reports': 'https://github.com/orgito/1forge-client/issues', 'Source':\n 'https://github.com/orgito/1forge-client'}"}), "(name='oneforge', version='0.1.0', description=\n '1Forge REST API wrapper', long_description=README,\n long_description_content_type='text/markdown', author='<NAME>',\n author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>',\n url='https://github.com/orgito/1forge-client', license='MIT',\n classifiers=['Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries'], keywords='1forge forex',\n packages=['oneforge'], setup_requires=['setuptools>=38.6.0'],\n install_requires=['requests'], python_requires='>=3.6', project_urls={\n 'Bug Reports': 'https://github.com/orgito/1forge-client/issues',\n 'Source': 'https://github.com/orgito/1forge-client'})\n", (118, 999), False, 'from setuptools import setup\n')]
|
import math
from datetime import datetime
from fortnite_api.enums import BrCosmeticType, BrCosmeticRarity
class NewBrCosmetics:
def __init__(self, data):
self.build = data.get('build')
self.previous_build = data.get('previousBuild')
self.hash = data.get('hash')
try:
self.date = datetime.strptime(data.get('date'), '%Y-%m-%dT%H:%M:%S%z')
except (ValueError, TypeError):
self.date = None
try:
self.last_addition = datetime.strptime(data.get('lastAddition'), '%Y-%m-%dT%H:%M:%S%z')
except (ValueError, TypeError):
self.last_addition = None
self.items = [BrCosmetic(i) for i in data.get('items')] if data.get('items') else None
self.raw_data = data
class BrCosmetic:
"""Represents a Battle Royale Cosmetic.
Attributes
-----------
id: :class:`str`
The id of the cosmetic.
type: :class:`BrCosmeticType`
The type of the cosmetic.
backend_type: :class:`str`
The internal type of the cosmetic.
rarity: :class:`BrCosmeticRarity`
The rarity of the cosmetic.
backend_rarity: :class:`str`
The internal rarity of the cosmetic.
name: :class:`str`
The name of the cosmetic in the chosen language.
description: :class:`str`
The description of the cosmetic in the chosen language.
set: Optional[:class:`str`]
The set of the cosmetic in the chosen language.
set_text: Optional[:class:`str`]
The text of the set of the cosmetic in the chosen language.
series: Optional[:class:`str`]
The series of the cosmetic in the chosen language.
backend_series: Optional[:class:`str`]
The internal series of the cosmetic.
small_icon: :class:`BrCosmeticImage`
The icon image in 128x128 resolution of the cosmetic.
icon: Optional[:class:`BrCosmeticImage`]
The icon image in 512x512 resolution of the cosmetic.
featured: Optional[:class:`BrCosmeticImage`]
The featured image in 1024x1024 resolution of the cosmetic.
background: Optional[:class:`BrCosmeticImage`]
The background image in 2048x1024 resolution of a loading screen.
cover_art: Optional[:class:`BrCosmeticImage`]
The cover art image in 512x512 resolution of a music pack.
decal: Optional[:class:`BrCosmeticImage`]
The decal in 512x512 resolution of a spray.
variants: Optional[List[:class:`BrCosmeticVariant`]]
A :class:`list` of :class:`BrCosmeticVariant` of the cosmetic.
gameplay_tags: Optional[List[:class:`str`]]
A :class:`list` of gameplay tags of the cosmetics.
display_asset_path: Optional[:class:`str`]
The path of the display asset.
path: :class:`str`
The path of the asset.
added: :class:`datetime.datetime`
The timestamp when the item was added to the Fortnite-API.com database.
raw_data: :class:`dict`
The raw data from request. Can be used for saving and recreating the class.
"""
def __init__(self, data):
self.id = data.get('id')
self.name = data.get('name')
self.description = data.get('description')
cosmetic_type = data.get('type', {}) if data.get('type') else {}
try:
self.type = BrCosmeticType(cosmetic_type.get('value'))
except ValueError:
self.type = BrCosmeticType.UNKNOWN
self.display_type = cosmetic_type.get('displayValue')
self.backend_type = cosmetic_type.get('backendValue')
rarity = data.get('rarity', {}) if data.get('rarity') else {}
try:
self.rarity = BrCosmeticRarity(rarity.get('value'))
except ValueError:
self.rarity = BrCosmeticRarity.UNKNOWN
self.rarity_text = rarity.get('displayValue')
self.backend_rarity = rarity.get('backendValue')
series = data.get('series', {}) if data.get('series') else {}
self.series = series.get('value')
self.series_image = series.get('image')
self.backend_series = series.get('backendValue')
cosmetic_set = data.get('set', {}) if data.get('set') else {}
self.set = cosmetic_set.get('value')
self.set_text = cosmetic_set.get('text')
self.backend_set = cosmetic_set.get('backendValue')
introduction = data.get('introduction', {}) if data.get('introduction') else {}
self.introduction_chapter = introduction.get('chapter')
self.introduction_season = introduction.get('season')
self.introduction_text = introduction.get('text')
self.backend_introduction = introduction.get('backendValue')
images = data.get('images', {}) if data.get('images') else {}
self.small_icon = BrCosmeticImage(images.get('smallIcon')) if images.get('smallIcon') else None
self.icon = BrCosmeticImage(images.get('icon')) if images.get('icon') else None
self.featured = BrCosmeticImage(images.get('featured')) if images.get('featured') else None
other_images = images.get('other', {}) if images.get('other') else {}
self.background = BrCosmeticImage(other_images.get('background')) if other_images.get('background') else None
self.cover_art = BrCosmeticImage(other_images.get('coverart')) if other_images.get('coverart') else None
self.decal = BrCosmeticImage(other_images.get('decal')) if other_images.get('decal') else None
self.background = BrCosmeticImage(other_images.get('background')) if other_images.get('background') else None
self.variants = [BrCosmeticVariant(variant) for variant in data.get('variants')] \
if data.get('variants') is not None else None
self.gameplay_tags = [gameplay_tag for gameplay_tag in data.get('gameplayTags')] \
if data.get('gameplayTags') is not None else None
self.meta_tags = [meta_tag for meta_tag in data.get('metaTags')] \
if data.get('metaTags') is not None else None
self.showcase_video = 'https://youtube.com/watch?v=' + data.get('showcaseVideo') \
if data.get('showcaseVideo') else None
self.display_asset_path = data.get('displayAssetPath')
self.definition_path = data.get('definitionPath')
self.path = data.get('path')
try:
self.added = datetime.strptime(data.get('added'), '%Y-%m-%dT%H:%M:%S%z')
except (ValueError, TypeError):
self.added = None
self.shop_history = []
for date in data.get('shopHistory', []) if data.get('shopHistory') else []:
try:
self.shop_history.append(datetime.strptime(date, '%Y-%m-%dT%H:%M:%S%z').replace(tzinfo=None))
except (ValueError, TypeError):
pass
self.appearances = len(self.shop_history)
self.first_appearance = self.shop_history[0] if self.appearances > 0 else None
self.last_appearance = self.shop_history[self.appearances - 1] if self.appearances > 0 else None
self.unseen_for = (datetime.utcnow() - self.last_appearance).days if self.last_appearance else None
self.raw_data = data
class BrCosmeticImage:
"""Represents a Battle Royale cosmetic image.
Attributes
-----------
url: :class:`str`
The hash of the image.
"""
def __init__(self, url):
self.url = url
def url_as(self, size):
if size < 0 or type(math.sqrt(size)) is float:
raise TypeError('Size must be a power of 2.')
url_without_type = self.url.replace('.png', '')
return url_without_type + '_' + size + '.png'
def __str__(self):
return self.url
class BrCosmeticVariant:
"""Represents a Battle Royale cosmetic image.
Attributes
-----------
channel: :class:`str`
The channel of the variant.
type: Optional[:class:`str`]
The type of the variant in the chosen language.
options: List[:class:`BrCosmeticVariantOption`]
A :class:`list` of :class:`BrCosmeticVariantOption` of the variant.
raw_data: :class:`dict`
The raw data from request. Can be used for saving and re-creating the class.
"""
def __init__(self, data):
self.channel = data.get('channel')
self.type = data.get('type')
self.options = [BrCosmeticVariantOption(option) for option in data.get('options')] \
if data.get('options') is not None else None
self.raw_data = data
class BrCosmeticVariantOption:
"""Represents a Battle Royale cosmetic image.
Attributes
-----------
tag: :class:`str`
The tag of the option.
name: :class:`str`
The name of the option in the chosen language.
image: :class:`BrCosmeticImage`
A image of the option.
raw_data: :class:`dict`
The raw data from request. Can be used for saving and re-creating the class.
"""
def __init__(self, data):
self.tag = data.get('tag')
self.name = data.get('name')
self.image = BrCosmeticImage(data.get('image'))
self.raw_data = data
|
[
"datetime.datetime.utcnow",
"datetime.datetime.strptime",
"math.sqrt"
] |
[((7046, 7063), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (7061, 7063), False, 'from datetime import datetime\n'), ((7434, 7449), 'math.sqrt', 'math.sqrt', (['size'], {}), '(size)\n', (7443, 7449), False, 'import math\n'), ((6643, 6689), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%Y-%m-%dT%H:%M:%S%z"""'], {}), "(date, '%Y-%m-%dT%H:%M:%S%z')\n", (6660, 6689), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import sys
import pyxb
import unittest
class TestTrac0132 (unittest.TestCase):
message = 'bad character \u2620'
def testDecode (self):
e = pyxb.PyXBException(self.message)
if sys.version_info[:2] > (2, 4):
self.assertEqual(self.message, e.args[0])
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"logging.basicConfig",
"pyxb.PyXBException",
"logging.getLogger"
] |
[((139, 166), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (156, 166), False, 'import logging\n'), ((110, 131), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (129, 131), False, 'import logging\n'), ((485, 500), 'unittest.main', 'unittest.main', ([], {}), '()\n', (498, 500), False, 'import unittest\n'), ((324, 356), 'pyxb.PyXBException', 'pyxb.PyXBException', (['self.message'], {}), '(self.message)\n', (342, 356), False, 'import pyxb\n')]
|
import copy
def merge(target, *args):
"""Merges arbitrary data - copied from http://blog.impressiver.com/post/31434674390/deep-merge-multiple-python-dicts
:param target: the data structure to fill
:param args: a list of data structures to merge into target
:return: target, with all data in args merged into it
:rtype: whatever type was originally passed in
"""
if len(args) > 1:
for item in args:
merge(target, item)
return target
item = args[0]
if not isinstance(item, dict):
return item
for key, value in item.items():
if key in target and isinstance(target[key], dict):
merge(target[key], value)
else:
if not key in target:
target[key] = copy.deepcopy(value)
return target
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
[
"copy.deepcopy"
] |
[((779, 799), 'copy.deepcopy', 'copy.deepcopy', (['value'], {}), '(value)\n', (792, 799), False, 'import copy\n')]
|
'''
Created on Jan 26, 2017
@author: tommi
'''
from enum import Enum
from handeval import pcg_brand
class Action(Enum):
CHECKFOLD = 1
CHECKCALL = 2
BETRAISE = 3
class Street(Enum):
PREFLOP = 0
FLOP = 3
TURN = 4
RIVER = 5
SHOWDOWN = 6
class PlayerState:
STACK_SIZE = 10000
MONEY_BEHIND = 990000
def __init__(self):
self.stack = self.STACK_SIZE
self.moneyLeft = self.MONEY_BEHIND
self.reset()
def bet(self, amount):
diff = amount - self.betSize
if diff >= self.stack:
self.betSize += self.stack
self.stack = 0
self.isAllIn = True
else:
self.betSize += diff
self.stack -= diff
def add_money(self, amount):
if amount > 0:
self.stack += amount
def reset(self):
self.betSize = 0
self.isAllIn = False
self.hasActed = False
self.hasFolded = False
self.cards = []
self.boardCards = []
return self.reload_stack()
def reload_stack(self):
if self.stack == 0:
if self.moneyLeft <= 0:
return False
else:
self.stack += min(self.STACK_SIZE, self.moneyLeft)
self.moneyLeft -= self.stack
return True
return True
class Agent: # Base class for all AI and human players. Plays random moves. Agent never modifies PlayerStates.
def __init__(self):
self.state = PlayerState()
def set_enemy_state(self, state):
self.enemyState = state
def get_action(self): # AI implementation
return Action(pcg_brand(3) + 1), pcg_brand(10000)
def update(self, street, pot):
pass
|
[
"handeval.pcg_brand"
] |
[((1827, 1843), 'handeval.pcg_brand', 'pcg_brand', (['(10000)'], {}), '(10000)\n', (1836, 1843), False, 'from handeval import pcg_brand\n'), ((1808, 1820), 'handeval.pcg_brand', 'pcg_brand', (['(3)'], {}), '(3)\n', (1817, 1820), False, 'from handeval import pcg_brand\n')]
|
from typing import Callable
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from src.domain_logic.engagement_domain import EngagementDomain
from src.model import get_database_session
from src.model.engagement import Engagement
async def update_engagement(engagement_domain: EngagementDomain,
func: Callable[[], AsyncSession] = get_database_session) -> bool:
async with func() as session:
query = select(Engagement).filter(Engagement.blog_id ==
engagement_domain.blog_id,
Engagement.user_id == engagement_domain.user_id)
result = await session.execute(query)
# Crash In Case Of None
engagement: Engagement = result.scalar_one_or_none()
if not engagement:
return False
engagement.isLiked = engagement_domain.isLiked
await session.commit()
return True
|
[
"sqlalchemy.select"
] |
[((470, 488), 'sqlalchemy.select', 'select', (['Engagement'], {}), '(Engagement)\n', (476, 488), False, 'from sqlalchemy import select\n')]
|
from unittest import TestCase
from unittest.mock import MagicMock
from unittest.mock import patch
from takeoff import *
import os
class WebProjectGeneratorTest(TestCase):
def setUp(self):
os.system('rm -rf test_dist/blog')
self.g = WebProjectGenerator('blog', [])
self.real_system_call = self.g.system_call
self.g.system_call = MagicMock()
self.g.base_dist_folder = MagicMock(return_value='test_dist')
def setup_project(self):
self.g.system_call = self.real_system_call
self.g.create_structure_folders()
self.g.create_django_project()
self.g.prepare_settings()
def line_block(self, starting_line, finishing_line, lines):
block = []
started = False
for line in lines:
if line == starting_line:
started = True
if started:
block.append(line)
if line == finishing_line:
started = False
return block
def test_project_folder(self):
self.assertEqual(self.g.project_folder(), 'test_dist/blog/web/blog')
def test_create_structure_folders(self):
self.g.create_structure_folders()
self.g.system_call.assert_called_with('mkdir -p test_dist/blog/web/')
def test_migrate_call(self):
self.g.migrate()
self.g.system_call.assert_called_with('cd test_dist/blog/web/blog && python3 manage.py migrate')
def test_install_libraries_call(self):
self.g.install_required_libraries()
self.g.system_call.assert_called_with('pip3 install django-bootstrap4')
def test_start_django_project(self):
self.g.start_django_project()
self.g.system_call.assert_called_with(f"cd test_dist/blog/web && {self.g.django_admin} startproject blog")
def test_start_main_app(self):
self.g.start_main_app()
self.g.system_call.assert_called_with('cd test_dist/blog/web/blog && python3 manage.py startapp main')
def test_create_admin(self):
self.g.create_admin()
self.g.system_call.assert_called_with('cd test_dist/blog/web/blog && python3 manage.py createsuperuser')
def test_prepare_settings(self):
self.setup_project()
file = open('test_dist/blog/web/blog/blog/settings.py', 'r')
lines = list(file)
file.close()
self.assertIn(" 'main',\n", lines)
self.assertIn(" 'bootstrap4',\n", lines)
def test_generate_main_urls(self):
self.setup_project()
self.g.generate_main_urls()
file = open('test_dist/blog/web/blog/main/urls.py', 'r')
lines = list(file)
file.close()
expected_lines = [
'from django.urls import path\n',
'from . import views\n',
"app_name = 'main'\n",
'\n',
'urlpatterns = [\n', ']'
]
self.assertEqual(expected_lines, lines)
def test_prepare_urls(self):
self.setup_project()
self.g.prepare_urls()
file = open('test_dist/blog/web/blog/blog/urls.py', 'r')
lines = self.line_block(
'urlpatterns = [\n',
']\n',
list(file)
)
file.close()
expected_lines = [
'urlpatterns = [\n',
" path('', include('main.urls')),\n",
" path('admin/', admin.site.urls),\n",
']\n'
]
self.assertEqual(expected_lines, lines)
|
[
"unittest.mock.MagicMock",
"os.system"
] |
[((202, 236), 'os.system', 'os.system', (['"""rm -rf test_dist/blog"""'], {}), "('rm -rf test_dist/blog')\n", (211, 236), False, 'import os\n'), ((366, 377), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (375, 377), False, 'from unittest.mock import MagicMock\n'), ((412, 447), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '"""test_dist"""'}), "(return_value='test_dist')\n", (421, 447), False, 'from unittest.mock import MagicMock\n')]
|
import time
from functools import wraps
from operators.operator import register_operators, delete_operators, operator_detail
from pipeline.pipeline import create_pipeline, delete_pipeline
from application.application import new_application, delete_application
def pre_operator(name="pytest_op_1", type="encoder",
addr="psoperator/vgg16-encoder:latest", author="phantoscope",
version="0.1", description="test operator"):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
register_operators(name=name, type=type, addr=addr, author=author,
version=version, description=description)
func(*args, **kwargs)
delete_operators(name=name)
return wrapper
return decorator
def pre_instance(operator_name="pytest_op_1", name="ins1"):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
operator = operator_detail(operator_name)
operator.new_instance(name)
func(*args, **kwargs)
operator.delete_instance(name)
return wrapper
return decorator
def pre_pipeline(name="pytest_pipe_1", processors="",
encoder={"name": "pytest_op_1", "instance": "ins1"},
description="test pipeline"):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
create_pipeline(name=name, processors=processors, encoder=encoder)
func(*args, **kwargs)
delete_pipeline(name)
return wrapper
return decorator
def pre_application(name="pytest_app_1",
fields={"full": {"type": "pipeline", "value": "pytest_pipe_1"}},
s3_buckets="test-bucket"):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
time.sleep(5) # wait for opertaor instance start
new_application(app_name=name, fields=fields, s3_bucket=s3_buckets)
func(*args, **kwargs)
delete_application(name, True)
return wrapper
return decorator
def sleep_time(seconds):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
time.sleep(seconds)
func(*args, **kwargs)
return wrapper
return decorator
|
[
"application.application.new_application",
"pipeline.pipeline.delete_pipeline",
"operators.operator.delete_operators",
"time.sleep",
"pipeline.pipeline.create_pipeline",
"functools.wraps",
"operators.operator.register_operators",
"operators.operator.operator_detail",
"application.application.delete_application"
] |
[((490, 501), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (495, 501), False, 'from functools import wraps\n'), ((908, 919), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (913, 919), False, 'from functools import wraps\n'), ((1382, 1393), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1387, 1393), False, 'from functools import wraps\n'), ((1834, 1845), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1839, 1845), False, 'from functools import wraps\n'), ((2210, 2221), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2215, 2221), False, 'from functools import wraps\n'), ((552, 665), 'operators.operator.register_operators', 'register_operators', ([], {'name': 'name', 'type': 'type', 'addr': 'addr', 'author': 'author', 'version': 'version', 'description': 'description'}), '(name=name, type=type, addr=addr, author=author, version=\n version, description=description)\n', (570, 665), False, 'from operators.operator import register_operators, delete_operators, operator_detail\n'), ((738, 765), 'operators.operator.delete_operators', 'delete_operators', ([], {'name': 'name'}), '(name=name)\n', (754, 765), False, 'from operators.operator import register_operators, delete_operators, operator_detail\n'), ((981, 1011), 'operators.operator.operator_detail', 'operator_detail', (['operator_name'], {}), '(operator_name)\n', (996, 1011), False, 'from operators.operator import register_operators, delete_operators, operator_detail\n'), ((1444, 1510), 'pipeline.pipeline.create_pipeline', 'create_pipeline', ([], {'name': 'name', 'processors': 'processors', 'encoder': 'encoder'}), '(name=name, processors=processors, encoder=encoder)\n', (1459, 1510), False, 'from pipeline.pipeline import create_pipeline, delete_pipeline\n'), ((1557, 1578), 'pipeline.pipeline.delete_pipeline', 'delete_pipeline', (['name'], {}), '(name)\n', (1572, 1578), False, 'from pipeline.pipeline import create_pipeline, delete_pipeline\n'), ((1896, 1909), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1906, 1909), False, 'import time\n'), ((1958, 2025), 'application.application.new_application', 'new_application', ([], {'app_name': 'name', 'fields': 'fields', 's3_bucket': 's3_buckets'}), '(app_name=name, fields=fields, s3_bucket=s3_buckets)\n', (1973, 2025), False, 'from application.application import new_application, delete_application\n'), ((2072, 2102), 'application.application.delete_application', 'delete_application', (['name', '(True)'], {}), '(name, True)\n', (2090, 2102), False, 'from application.application import new_application, delete_application\n'), ((2272, 2291), 'time.sleep', 'time.sleep', (['seconds'], {}), '(seconds)\n', (2282, 2291), False, 'import time\n')]
|
#coding:utf-8
# return candidate position set of one pitch duration near center of the frame
# by differential change point and threshold from bottom line.
# return 0 if there is no.
#
# 中心付近の1ピッチ分の候補インデックス[sp,ep]を返す。
# 候補が無いときは零を返す。
#
# 微分の変化点と閾値により候補を選出する。
import numpy as np
import matplotlib.pyplot as plt
# Check version
# Python 3.6.4, 64bit on Win32 (Windows 10)
# numpy (1.14.0)
def diff_ana(y, sr, show=False):
# (1) 傾きの変化より選択
f_prime=np.gradient(y) # 数値勾配(傾き)
indices_diff0 = np.where( np.diff(np.sign(f_prime)) > 0.0 )[0] # 符号(-1,0,1)化したものの差分をとり、正値の変化点を検出する
# (2) 底辺に近い値を選択
thres0= (np.amax(y) - np.amin(y)) * 0.25 + np.amin(y) # 最小値から振幅幅の25%までの値を候補として使う。
indices_thres0 = np.where( y < thres0 )[0]
# (3) 上記の条件を満たす 論理積 を取る
indices=np.sort(np.array(list( set(indices_diff0) & set(indices_thres0))))
infections = y[indices]
if len(indices) >= 2: # 候補が2個以上のときに、探す。
index0= np.argmin(np.abs(indices - len(y)/2)) # 中心に一番近いインデックスを求める
if len(indices) == 2: # 候補が2個しかないときは
sp= indices[0]
ep= indices[1]
elif index0 < len(y)/2 and indices[-1] > len(y)/2 : # そのインデックスが中心より前ならば
sp= indices[index0]
ep= indices[index0+1]
else:
sp= indices[index0-1]
ep= indices[index0]
else: # 候補が無い
sp=0
ep=0
indices1=np.array([sp,ep])
infections1 = y[indices1]
#print ( indices, indices1)
#print ('select index, [Hz]', indices1, (sr / (indices1[1]-indices1[0])) )
if show:
fig = plt.figure()
ax1 = fig.add_subplot(311)
plt.title('diff: two red cirles shows selected portion')
plt.xlabel('mSec')
plt.ylabel('level')
ax1.plot(np.arange(len(y)) * 1000.0 / sr, y, 'bo-', ms=2)
ax1.plot(indices * 1000.0 / sr, infections, 'yo', ms=5)
ax1.plot(indices1 * 1000.0 / sr, infections1, 'ro', ms=5)
ax2 = fig.add_subplot(312)
ax2.plot(np.arange(len(f_prime)) * 1000.0 / sr, f_prime, 'ro', ms=5)
ax3 = fig.add_subplot(313)
f_prime2=np.gradient(f_prime)
indices2 = np.where(np.diff(np.sign(f_prime2)))[0]
infections2 = y[indices2]
ax3.plot(np.arange(len(y)) * 1000.0 / sr, y, 'bo-', ms=2)
ax3.plot(indices2 * 1000.0 / sr, infections2, 'ro', ms=5)
plt.show()
return int(sp), int(ep)
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.amin",
"numpy.amax",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.array",
"numpy.sign",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.gradient"
] |
[((477, 491), 'numpy.gradient', 'np.gradient', (['y'], {}), '(y)\n', (488, 491), True, 'import numpy as np\n'), ((1440, 1458), 'numpy.array', 'np.array', (['[sp, ep]'], {}), '([sp, ep])\n', (1448, 1458), True, 'import numpy as np\n'), ((679, 689), 'numpy.amin', 'np.amin', (['y'], {}), '(y)\n', (686, 689), True, 'import numpy as np\n'), ((741, 761), 'numpy.where', 'np.where', (['(y < thres0)'], {}), '(y < thres0)\n', (749, 761), True, 'import numpy as np\n'), ((1649, 1661), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1659, 1661), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1763), 'matplotlib.pyplot.title', 'plt.title', (['"""diff: two red cirles shows selected portion"""'], {}), "('diff: two red cirles shows selected portion')\n", (1716, 1763), True, 'import matplotlib.pyplot as plt\n'), ((1773, 1791), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""mSec"""'], {}), "('mSec')\n", (1783, 1791), True, 'import matplotlib.pyplot as plt\n'), ((1801, 1820), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""level"""'], {}), "('level')\n", (1811, 1820), True, 'import matplotlib.pyplot as plt\n'), ((2208, 2228), 'numpy.gradient', 'np.gradient', (['f_prime'], {}), '(f_prime)\n', (2219, 2228), True, 'import numpy as np\n'), ((2478, 2488), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2486, 2488), True, 'import matplotlib.pyplot as plt\n'), ((645, 655), 'numpy.amax', 'np.amax', (['y'], {}), '(y)\n', (652, 655), True, 'import numpy as np\n'), ((658, 668), 'numpy.amin', 'np.amin', (['y'], {}), '(y)\n', (665, 668), True, 'import numpy as np\n'), ((542, 558), 'numpy.sign', 'np.sign', (['f_prime'], {}), '(f_prime)\n', (549, 558), True, 'import numpy as np\n'), ((2267, 2284), 'numpy.sign', 'np.sign', (['f_prime2'], {}), '(f_prime2)\n', (2274, 2284), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__project__ = 'leetcode'
__file__ = '__init__.py'
__author__ = 'king'
__time__ = '2019/11/18 16:54'
_ooOoo_
o8888888o
88" . "88
(| -_- |)
O\ = /O
____/`---'\____
.' \\| |// `.
/ \\||| : |||// \
/ _||||| -:- |||||- \
| | \\\ - /// | |
| \_| ''\---/'' | |
\ .-\__ `-` ___/-. /
___`. .' /--.--\ `. . __
."" '< `.___\_<|>_/___.' >'"".
| | : `- \`.;`\ _ /`;.`/ - ` : | |
\ \ `-. \_ __\ /__ _/ .-` / /
======`-.____`-.___\_____/___.-`____.-'======
`=---='
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
佛祖保佑 永无BUG
"""
from leetcode.lessons.linked_list import ListNode
from leetcode.utils.timeutils import time_interval
'''
难度:中等
反转从位置 m 到 n 的链表。请使用一趟扫描完成反转。
说明:
1 ≤m≤n≤ 链表长度。
示例:
输入: 1->2->3->4->5->NULL, m = 2, n = 4
输出: 1->4->3->2->5->NULL
'''
class Solution(object):
@time_interval
def reverseBetween(self, head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
"""
result = node = ListNode(None)
result.next = head
n -= m
while m > 1:
node = node.next
m -= 1
tail = None
reversed_head = None
next_reverse = node.next
while n >= 0:
tail = next_reverse.next
next_reverse.next = reversed_head
reversed_head = next_reverse
next_reverse = tail
n -= 1
node.next.next = tail
node.next = reversed_head
return result.next
@time_interval
def reverseBetween2(self, head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
"""
if not head:
return None
cur, pre = head, None
while m > 1:
pre = cur
cur = cur.next
m -= 1
n -= 1
tail, con = cur, pre
while n:
temp = cur.next
cur.next = pre
pre = cur
cur = temp
n -= 1
if con:
con.next = pre
else:
head = pre
tail.next = cur
return head
temp = ListNode(None)
def reverse_n(self, head, n):
if n == 1:
self.temp = head.next
return head
last = self.reverse_n(head.next, n - 1)
head.next.next = head
head.next = self.temp
return last
@time_interval
def reverseBetween3(self, head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
"""
if m == 1:
return self.reverse_n(head, n)
head.next = self.reverseBetween3(head.next, m - 1, n - 1)
return head
l1 = ListNode.generate([1, 2, 3, 4, 5])
print(Solution().reverseBetween(l1, 2, 4))
l2 = ListNode.generate([1, 2, 3, 4, 5])
print(Solution().reverseBetween2(l2, 2, 4))
l2 = ListNode.generate([1, 2, 3, 4, 5])
print(Solution().reverseBetween3(l2, 2, 4))
|
[
"leetcode.lessons.linked_list.ListNode.generate",
"leetcode.lessons.linked_list.ListNode"
] |
[((3306, 3340), 'leetcode.lessons.linked_list.ListNode.generate', 'ListNode.generate', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (3323, 3340), False, 'from leetcode.lessons.linked_list import ListNode\n'), ((3390, 3424), 'leetcode.lessons.linked_list.ListNode.generate', 'ListNode.generate', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (3407, 3424), False, 'from leetcode.lessons.linked_list import ListNode\n'), ((3475, 3509), 'leetcode.lessons.linked_list.ListNode.generate', 'ListNode.generate', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (3492, 3509), False, 'from leetcode.lessons.linked_list import ListNode\n'), ((2713, 2727), 'leetcode.lessons.linked_list.ListNode', 'ListNode', (['None'], {}), '(None)\n', (2721, 2727), False, 'from leetcode.lessons.linked_list import ListNode\n'), ((1544, 1558), 'leetcode.lessons.linked_list.ListNode', 'ListNode', (['None'], {}), '(None)\n', (1552, 1558), False, 'from leetcode.lessons.linked_list import ListNode\n')]
|
# coding: UTF-8
import unittest
from usig_normalizador_amba.Callejero import Callejero
from usig_normalizador_amba.Partido import Partido
from usig_normalizador_amba.Calle import Calle
from tests.test_commons import cargarCallejeroEstatico
class CallejeroTestCase(unittest.TestCase):
p = Partido('jose_c_paz', '<NAME>', 'Partido de <NAME>', 2430431)
c = Callejero(p)
cargarCallejeroEstatico(c)
p = Partido('general_san_martin', 'General San Martin', 'Partido de General San Martin', 1719022)
c_san_martin = Callejero(p)
cargarCallejeroEstatico(c_san_martin)
def _checkCalle(self, calle, codigo, nombre, codigo_partido, localidad):
self.assertTrue(isinstance(calle, Calle))
self.assertEqual(calle.codigo, codigo)
self.assertEqual(calle.nombre, nombre)
self.assertEqual(calle.partido.codigo, codigo_partido)
self.assertEqual(calle.localidad, localidad)
def testCallejero_callejero_inexistent(self):
p = Partido('jose_paz', '<NAME>', 'Partido de José C. Paz', 2430431)
self.assertRaises(ValueError, Callejero, p)
def testCallejero_buscarCalle_calle_inexistente(self):
res = self.c.buscarCalle('kokusai dori')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 0, 'No debería haber matching.')
def testCallejero_buscarCalle_unica_calle_existente(self):
res = self.c.buscarCalle('<NAME> Compostela')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 solo matching.')
self._checkCalle(res[0], 53658, 'S<NAME>', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_nombre_permutado(self):
res = self.c.buscarCalle('Compostela Santiago de')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 solo matching.')
self._checkCalle(res[0], 53658, 'Santiago de Compostela', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_nombre_incompleto(self):
res = self.c.buscarCalle('Compos Santi')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 solo matching.')
self._checkCalle(res[0], 53658, 'Santiago de Compostela', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_nombre_con_acento_y_case(self):
res = self.c.buscarCalle('PoToSÍ')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 solo matching.')
self._checkCalle(res[0], 341221, 'Potosí', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_nombre_con_enie(self):
res = self.c.buscarCalle('<NAME>')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matching.')
self._checkCalle(res[0], 77440, '<NAME>', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_multiples_calles_existentes(self):
res = self.c.buscarCalle('San')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 16, 'Debería haber 16 matchings.')
resCalles = ['San Lorenzo', 'San Nicolás', 'San Blas', 'San Salvador', 'San Luis', 'San Marino', 'San Agustín',
'Santiago del Estero', 'Santiago de Compostela', 'Santiago L. Copello', 'Santa Marta', 'Santo Domingo',
'Santa Ana', 'Santiago de Liniers', 'Santa María', 'S<NAME>']
for calle in res:
self.assertTrue(isinstance(calle, Calle))
self.assertTrue(calle.nombre in resCalles)
def testCallejero_buscarCalle_calles_con_y_01(self):
res = self.c.buscarCalle('Gelly y Obes')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matchings.')
self._checkCalle(res[0], 77481, 'Gelly y Obes', 'jose_c_paz', '<NAME>')
res = self.c.buscarCalle('g y o')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matchings.')
self._checkCalle(res[0], 77481, 'Gelly y Obes', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_calles_con_y_02(self):
res = self.c.buscarCalle('<NAME>')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matchings.')
self._checkCalle(res[0], 11702, '<NAME>', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_calles_con_e_01(self):
res = self.c.buscarCalle('<NAME>')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matching.')
self._checkCalle(res[0], 78817, '<NAME>', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCodigo_codigo_valido(self):
res = self.c.buscarCodigo(314724)
self.assertTrue(isinstance(res, list))
self.assertTrue(res[0][0] == 314724)
self.assertTrue(res[0][1] == '<NAME> (M) / <NAME> (JCP)')
def testCallejero_buscarCodigo_codigo_invalido(self):
res = self.c.buscarCodigo(666)
self.assertTrue(res == [])
def testCallejero_buscarCalle_sinonimos_01(self):
res1 = self.c.buscarCalle('11')
self.assertTrue(isinstance(res1, list))
self.assertEqual(len(res1), 1, 'Debería haber 1 matching.')
res2 = self.c.buscarCalle('once')
self.assertTrue(isinstance(res2, list))
self.assertEqual(len(res2), 1, 'Debería haber 1 matching.')
self.assertEqual(res1[0].codigo, res2[0].codigo)
def testCallejero_buscarCalle_sinonimos_02(self):
res1 = self.c.buscarCalle('3') # 3 de Febrero, Tres Sargentos y Las Tres Marías
self.assertTrue(isinstance(res1, list))
self.assertEqual(len(res1), 3, 'Debería haber 1 matching.')
self.assertTrue(res1[0].codigo in [78879, 53341, 237007])
self.assertTrue(res1[1].codigo in [78879, 53341, 237007])
self.assertTrue(res1[2].codigo in [78879, 53341, 237007])
def testCallejero_buscarCalle_muchos_espacios(self):
res = self.c.buscarCalle(' puerto principe ')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matching.')
self._checkCalle(res[0], 183044, 'P<NAME>', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_calle_con_parentesis(self):
res = self.c.buscarCalle('Coliqueo (JCP)')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matching.')
self._checkCalle(res[0], 186501, 'Intendente Arricau (SM) / Cacique Coliqueo (JCP)', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_caracteres_raros(self):
res = self.c.buscarCalle('puerto principe |°¬!#$%&/()=?\¿¡*¸+~{[^}]\'`-_.:,;<>·@')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matching.')
self._checkCalle(res[0], 183044, '<NAME>', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_calle_con_acente_escrito_sin_acento(self):
res = self.c.buscarCalle('potosi')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matching.')
self._checkCalle(res[0], 341221, 'Potosí', 'jose_c_paz', '<NAME>')
def testCallejero_buscarCalle_calle_con_numeros(self):
res = self.c_san_martin.buscarCalle('26 de Julio de 1890')
self.assertTrue(isinstance(res, list))
self.assertEqual(len(res), 1, 'Debería haber 1 matching.')
self._checkCalle(res[0], 70996, '103 - 26 de Julio de 1890', 'general_san_martin', 'General San Martín')
|
[
"usig_normalizador_amba.Partido.Partido",
"usig_normalizador_amba.Callejero.Callejero",
"tests.test_commons.cargarCallejeroEstatico"
] |
[((297, 358), 'usig_normalizador_amba.Partido.Partido', 'Partido', (['"""jose_c_paz"""', '"""<NAME>"""', '"""Partido de <NAME>"""', '(2430431)'], {}), "('jose_c_paz', '<NAME>', 'Partido de <NAME>', 2430431)\n", (304, 358), False, 'from usig_normalizador_amba.Partido import Partido\n'), ((367, 379), 'usig_normalizador_amba.Callejero.Callejero', 'Callejero', (['p'], {}), '(p)\n', (376, 379), False, 'from usig_normalizador_amba.Callejero import Callejero\n'), ((384, 410), 'tests.test_commons.cargarCallejeroEstatico', 'cargarCallejeroEstatico', (['c'], {}), '(c)\n', (407, 410), False, 'from tests.test_commons import cargarCallejeroEstatico\n'), ((420, 517), 'usig_normalizador_amba.Partido.Partido', 'Partido', (['"""general_san_martin"""', '"""General San Martin"""', '"""Partido de General San Martin"""', '(1719022)'], {}), "('general_san_martin', 'General San Martin',\n 'Partido de General San Martin', 1719022)\n", (427, 517), False, 'from usig_normalizador_amba.Partido import Partido\n'), ((533, 545), 'usig_normalizador_amba.Callejero.Callejero', 'Callejero', (['p'], {}), '(p)\n', (542, 545), False, 'from usig_normalizador_amba.Callejero import Callejero\n'), ((550, 587), 'tests.test_commons.cargarCallejeroEstatico', 'cargarCallejeroEstatico', (['c_san_martin'], {}), '(c_san_martin)\n', (573, 587), False, 'from tests.test_commons import cargarCallejeroEstatico\n'), ((989, 1053), 'usig_normalizador_amba.Partido.Partido', 'Partido', (['"""jose_paz"""', '"""<NAME>"""', '"""Partido de José C. Paz"""', '(2430431)'], {}), "('jose_paz', '<NAME>', 'Partido de José C. Paz', 2430431)\n", (996, 1053), False, 'from usig_normalizador_amba.Partido import Partido\n')]
|
try:
import pygame
except ImportError:
raise ImportError("\n<pygame> library is missing on your system."
"\nTry: \n C:\\pip install pygame on a window command prompt.")
from SoundServer import *
if __name__ == "__main__":
pygame.mixer.init()
sound1 = pygame.mixer.Sound('Alarm9.ogg')
SCREENRECT = pygame.Rect(0, 0, 800, 1024)
pygame.display.set_mode((SCREENRECT.w, SCREENRECT.h))
SND = SoundControl(SCREENRECT, 8)
# SND.play(sound1, -1, volume_=1.0, panning_=False)
SND.play(sound1, -1, volume_=1.0, panning_=True, x_=400, fade_in_ms=0, fade_out_ms=0)
# SND.play(sound1, -1, volume_=1.0, panning_=True, x_=100)
# SND.play(sound1, -1, volume_=1.0, panning_=True, x_=200)
# SND.play(sound1, -1, volume_=1.0, panning_=True, x_=400)
# SND.play(sound1, -1, volume_=1.0, panning_=True, x_=800)
# SND.play(sound1, -1, volume_=1.0, panning_=True, x_=100)
# SND.play(sound1, -1, volume_=1.0, panning_=True, x_=450)
x = 0
v = 1.0
FRAME = 0
while 1:
# SND.show_sounds_playing()
pygame.event.pump()
pygame.display.flip()
# SND.update_sounds_panning(x, v)
SND.update_sound_panning(x, 1.0, name_="", id_=id(sound1))
if x < SCREENRECT.w:
SND.update_sound_panning(x, 0.2, None, id(sound1))
x += 0.1
else:
SND.update_volume(1)
SND.update()
if 4000 < FRAME < 9000:
SND.pause_sounds()
else:
SND.unpause_sound(id_=id(sound1))
SND.show_free_channels()
SND.show_sounds_playing()
print(SND.return_time_left(id(sound1)))
print(FRAME)
if FRAME == 1000:
SND.stop_all()
print(SND.get_identical_sounds(sound1))
print(SND.get_identical_id(id(sound1)))
x += 1
x %= SCREENRECT.w
FRAME += 1
|
[
"pygame.display.set_mode",
"pygame.event.pump",
"pygame.Rect",
"pygame.mixer.init",
"pygame.display.flip",
"pygame.mixer.Sound"
] |
[((263, 282), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (280, 282), False, 'import pygame\n'), ((297, 329), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""Alarm9.ogg"""'], {}), "('Alarm9.ogg')\n", (315, 329), False, 'import pygame\n'), ((348, 376), 'pygame.Rect', 'pygame.Rect', (['(0)', '(0)', '(800)', '(1024)'], {}), '(0, 0, 800, 1024)\n', (359, 376), False, 'import pygame\n'), ((382, 435), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(SCREENRECT.w, SCREENRECT.h)'], {}), '((SCREENRECT.w, SCREENRECT.h))\n', (405, 435), False, 'import pygame\n'), ((1106, 1125), 'pygame.event.pump', 'pygame.event.pump', ([], {}), '()\n', (1123, 1125), False, 'import pygame\n'), ((1135, 1156), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (1154, 1156), False, 'import pygame\n')]
|
import paho.mqtt.client as mqttw
class MqttClient :
def __init__( self , address = "localhost", port = 1883 , id_ = "" , subscribe = "" , message = None ) :
self.address = address
self.port = port
self.subscribe = subscribe
self.message = message
self.client = mqttw.Client( client_id = id_ )
self.client.connect( self.address , self.port )
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.loop_start()
def on_connect( self , client , userdata , flags , rc ) :
for subscribe in self.subscribe :
self.client.subscribe( subscribe )
def on_message( self , client , userdata , msg ) :
if self.message is None :
return
self.message( client , userdata , msg )
def publish( self , topic, payload=None, qos=0, retain=False ) :
self.client.publish( topic , payload , qos , retain )
|
[
"paho.mqtt.client.Client"
] |
[((310, 337), 'paho.mqtt.client.Client', 'mqttw.Client', ([], {'client_id': 'id_'}), '(client_id=id_)\n', (322, 337), True, 'import paho.mqtt.client as mqttw\n')]
|
import airflow
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.postgres_operator import PostgresOperator
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import boto3
from io import StringIO
def S3_toRedShift(*args, **kwargs):
aws_hook = AwsHook("aws_credentials")
credentials = aws_hook.get_credentials()
redshift_hook = PostgresHook("redshift")
day = {kwargs['ds']}.pop()
day = datetime.strptime(day, '%Y-%m-%d').date() - timedelta(days=2)
day = day.strftime("%m-%d-%Y")
sql = """
COPY {}
FROM '{}'
ACCESS_KEY_ID '{}'
SECRET_ACCESS_KEY '{}'
IGNOREHEADER 1
DELIMITER ';'
TIMEFORMAT 'auto'
"""
redshift_hook.run(sql.format("log_review",
"s3://data-raw-bucket/log_reviews.csv/",
credentials.access_key,
credentials.secret_key))
return
def insertUSdata(*args, **kwargs):
# aws_hook = AwsHook("aws_credentials")
# credentials = aws_hook.get_credentials()
redshift_hook = PostgresHook("redshift")
day = {kwargs['ds']}.pop()
day = datetime.strptime(day, '%Y-%m-%d').date() - timedelta(days=2)
day = day.strftime("%Y-%m-%d")
sql = """
INSERT INTO device
SELECT
device
FROM log_reviw """
redshift_hook.run(sql.format("device"))
return
default_args = {
'owner': 'ashwath',
'depends_on_past': False,
'start_date': datetime(2018, 11, 1),
'end_date': datetime(2018, 11, 30),
'email_on_failure': True,
'email_on_retry': False,
'retries': 0,
'retry_delay': timedelta(minutes=1),
'catchup': False
}
# hourly: cron is '0 * * * *': https://airflow.apache.org/docs/stable/scheduler.html
dag = DAG('log_reviews',
default_args=default_args,
description='Load and transform data in Redshift with Airflow',
max_active_runs=1,
# https://airflow.apache.org/docs/stable/scheduler.html
schedule_interval='0 0 * * *'
#schedule_interval=timedelta(days=1),
#schedule_interval='0 * * * *'
)
create_table_main = PostgresOperator(
task_id="create_log",
dag=dag,
postgres_conn_id="redshift",
sql=""" CREATE TABLE IF NOT EXISTS log_review (
id_log INTEGER,
device VARCHAR,
location VARCHAR,
os DATE,
ip VARCHAR,
phone_number VARCHAR,
browser VARCHAR);""")
create_table_device = PostgresOperator(
task_id="create_device",
dag=dag,
postgres_conn_id="redshift",
sql=""" CREATE TABLE IF NOT EXISTS device (
id_dim_devices INTEGER IDENTITY(1,1),
device VARCHAR,
);""")
MovetoRedShift = PythonOperator(
task_id="S3_toRedShift",
provide_context=True,
python_callable=S3_toRedShift,
dag=dag
)
insert_Devices = PythonOperator(
task_id="insert_Devices",
provide_context=True,
python_callable=insertUSdata,
dag=dag
)
create_table_main >> create_table_device >> MovetoRedShift >> insert_Devices
|
[
"airflow.operators.postgres_operator.PostgresOperator",
"airflow.DAG",
"airflow.contrib.hooks.aws_hook.AwsHook",
"airflow.operators.python_operator.PythonOperator",
"datetime.datetime",
"datetime.datetime.strptime",
"datetime.timedelta",
"airflow.hooks.postgres_hook.PostgresHook"
] |
[((1998, 2166), 'airflow.DAG', 'DAG', (['"""log_reviews"""'], {'default_args': 'default_args', 'description': '"""Load and transform data in Redshift with Airflow"""', 'max_active_runs': '(1)', 'schedule_interval': '"""0 0 * * *"""'}), "('log_reviews', default_args=default_args, description=\n 'Load and transform data in Redshift with Airflow', max_active_runs=1,\n schedule_interval='0 0 * * *')\n", (2001, 2166), False, 'from airflow import DAG\n'), ((2384, 2721), 'airflow.operators.postgres_operator.PostgresOperator', 'PostgresOperator', ([], {'task_id': '"""create_log"""', 'dag': 'dag', 'postgres_conn_id': '"""redshift"""', 'sql': '""" CREATE TABLE IF NOT EXISTS log_review (\n id_log INTEGER,\n device VARCHAR,\n location VARCHAR,\n os DATE,\n ip VARCHAR,\n phone_number VARCHAR,\n browser VARCHAR);"""'}), '(task_id=\'create_log\', dag=dag, postgres_conn_id=\'redshift\',\n sql=\n """ CREATE TABLE IF NOT EXISTS log_review (\n id_log INTEGER,\n device VARCHAR,\n location VARCHAR,\n os DATE,\n ip VARCHAR,\n phone_number VARCHAR,\n browser VARCHAR);"""\n )\n', (2400, 2721), False, 'from airflow.operators.postgres_operator import PostgresOperator\n'), ((2760, 2995), 'airflow.operators.postgres_operator.PostgresOperator', 'PostgresOperator', ([], {'task_id': '"""create_device"""', 'dag': 'dag', 'postgres_conn_id': '"""redshift"""', 'sql': '""" CREATE TABLE IF NOT EXISTS device (\n id_dim_devices INTEGER IDENTITY(1,1),\n device VARCHAR,\n );"""'}), '(task_id=\'create_device\', dag=dag, postgres_conn_id=\n \'redshift\', sql=\n """ CREATE TABLE IF NOT EXISTS device (\n id_dim_devices INTEGER IDENTITY(1,1),\n device VARCHAR,\n );"""\n )\n', (2776, 2995), False, 'from airflow.operators.postgres_operator import PostgresOperator\n'), ((3018, 3123), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""S3_toRedShift"""', 'provide_context': '(True)', 'python_callable': 'S3_toRedShift', 'dag': 'dag'}), "(task_id='S3_toRedShift', provide_context=True,\n python_callable=S3_toRedShift, dag=dag)\n", (3032, 3123), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((3156, 3261), 'airflow.operators.python_operator.PythonOperator', 'PythonOperator', ([], {'task_id': '"""insert_Devices"""', 'provide_context': '(True)', 'python_callable': 'insertUSdata', 'dag': 'dag'}), "(task_id='insert_Devices', provide_context=True,\n python_callable=insertUSdata, dag=dag)\n", (3170, 3261), False, 'from airflow.operators.python_operator import PythonOperator\n'), ((440, 466), 'airflow.contrib.hooks.aws_hook.AwsHook', 'AwsHook', (['"""aws_credentials"""'], {}), "('aws_credentials')\n", (447, 466), False, 'from airflow.contrib.hooks.aws_hook import AwsHook\n'), ((532, 556), 'airflow.hooks.postgres_hook.PostgresHook', 'PostgresHook', (['"""redshift"""'], {}), "('redshift')\n", (544, 556), False, 'from airflow.hooks.postgres_hook import PostgresHook\n'), ((1280, 1304), 'airflow.hooks.postgres_hook.PostgresHook', 'PostgresHook', (['"""redshift"""'], {}), "('redshift')\n", (1292, 1304), False, 'from airflow.hooks.postgres_hook import PostgresHook\n'), ((1699, 1720), 'datetime.datetime', 'datetime', (['(2018)', '(11)', '(1)'], {}), '(2018, 11, 1)\n', (1707, 1720), False, 'from datetime import datetime, timedelta\n'), ((1738, 1760), 'datetime.datetime', 'datetime', (['(2018)', '(11)', '(30)'], {}), '(2018, 11, 30)\n', (1746, 1760), False, 'from datetime import datetime, timedelta\n'), ((1858, 1878), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (1867, 1878), False, 'from datetime import datetime, timedelta\n'), ((643, 660), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (652, 660), False, 'from datetime import datetime, timedelta\n'), ((1391, 1408), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (1400, 1408), False, 'from datetime import datetime, timedelta\n'), ((599, 633), 'datetime.datetime.strptime', 'datetime.strptime', (['day', '"""%Y-%m-%d"""'], {}), "(day, '%Y-%m-%d')\n", (616, 633), False, 'from datetime import datetime, timedelta\n'), ((1347, 1381), 'datetime.datetime.strptime', 'datetime.strptime', (['day', '"""%Y-%m-%d"""'], {}), "(day, '%Y-%m-%d')\n", (1364, 1381), False, 'from datetime import datetime, timedelta\n')]
|
#!/usr/bin/env python
import optparse, sys, os, logging
from collections import defaultdict
optparser = optparse.OptionParser()
optparser.add_option("-d", "--datadir", dest="datadir", default="data", help="data directory (default=data)")
optparser.add_option("-p", "--prefix", dest="fileprefix", default="hansards", help="prefix of parallel data files (default=hansards)")
optparser.add_option("-e", "--english", dest="english", default="en", help="suffix of English (target language) filename (default=en)")
optparser.add_option("-f", "--french", dest="french", default="fr", help="suffix of French (source language) filename (default=fr)")
optparser.add_option("-l", "--logfile", dest="logfile", default=None, help="filename for logging output")
optparser.add_option("-t", "--threshold", dest="threshold", default=0.5, type="float", help="threshold for alignment (default=0.5)")
optparser.add_option("-n", "--num_sentences", dest="num_sents", default=sys.maxint, type="int", help="Number of sentences to use for training and alignment")
(opts, _) = optparser.parse_args()
f_data = "%s.%s" % (os.path.join(opts.datadir, opts.fileprefix), opts.french)
e_data = "%s.%s" % (os.path.join(opts.datadir, opts.fileprefix), opts.english)
if opts.logfile:
logging.basicConfig(filename=opts.logfile, filemode='w', level=logging.INFO)
sys.stderr.write("Training using EM algorithm...")
bitext = [[sentence.strip().split() for sentence in pair] for pair in zip(open(f_data), open(e_data))[:opts.num_sents]]
f_count = defaultdict(int)
e_count = defaultdict(int)
fe_count = defaultdict(int)
qa_count = defaultdict(int) #Counts for alignments q(j|i,l,m)
q_count = defaultdict(int) #Counts for alignments q(i,l,m)
#Where j is the alignment number of the english sentence,
#i is the alignment number of the french sentence
#l is the length of the english sentence
#m is the length of the french sentence
t_k = defaultdict(int)
q_k = defaultdict(int)
iterations = 10
k = 0
#Initialize
sys.stderr.write("\n")
sys.stderr.write("Initializing...")
for(a,(b,c)) in enumerate(bitext):
for (i,f_i) in enumerate(b):
for (j,e_j) in enumerate(c):
t_k[(f_i,e_j)] = 1.0
q_k[(j,i,len(c),len(b))] = 1.0
if a%1000 == 0: sys.stderr.write(".")
sys.stderr.write("\n")
sys.stderr.write("Done initializing\n")
sys.stderr.write("Training " + str(iterations) + " iterations.\n")
while(k < iterations):
k += 1
sys.stderr.write("Iteration " + str(k) + "...\n")
e_count = defaultdict(int)
fe_count = defaultdict(int)
for (n,(f,e)) in enumerate(bitext):
for (i,f_i) in enumerate(f):
Z = 0
for (j,e_j) in enumerate(e):
Z += t_k[(f_i,e_j)]*q_k[(j,i,len(e),len(f))]
for (j,e_j) in enumerate(e):
c = (t_k[(f_i,e_j)]*q_k[(j,i,len(e),len(f))])/Z
fe_count[(f_i,e_j)] += c
e_count[e_j] += c
qa_count[(j,i,len(e),len(f))] += c
q_count[(i,len(e),len(f))] += c
for (f,e) in fe_count.keys():
t_k[(f,e)] = fe_count[(f,e)]/e_count[e]
for (j,i,l,m) in qa_count.keys():
q_k[(j,i,l,m)] = qa_count[(j,i,l,m)]/q_count[(i,l,m)]
sys.stderr.write("Training Complete...\n")
sys.stderr.write("Aligning...\n")
for (k,(f,e)) in enumerate(bitext):
for (i,f_i) in enumerate(f):
#print("Number of french: " + str(i))
bestp = 0
bestj = 0
for (j,e_j) in enumerate(e):
#print(j)
if t_k[(f_i,e_j)]*q_k[(j,i,len(e),len(f))] > bestp:
bestp = t_k[(f_i,e_j)]*q_k[(j,i,len(e),len(f))]
bestj = j
#print("Chosen J: " + str(bestj))
sys.stdout.write("%i-%i " %(i,bestj))
sys.stdout.write("\n")
if False: """
for (n, (f, e)) in enumerate(bitext):
for f_i in set(f):
f_count[f_i] += 1
for e_j in set(e):
fe_count[(f_i,e_j)] += 1
for e_j in set(e):
e_count[e_j] += 1
if n % 500 == 0:
sys.stderr.write(".")
dice = defaultdict(int)
for (k, (f_i, e_j)) in enumerate(fe_count.keys()):
dice[(f_i,e_j)] = 2.0 * fe_count[(f_i, e_j)] / (f_count[f_i] + e_count[e_j])
if k % 5000 == 0:
sys.stderr.write(".")
sys.stderr.write("\n")
for (f, e) in bitext:
for (i, f_i) in enumerate(f):
for (j, e_j) in enumerate(e):
if dice[(f_i,e_j)] >= opts.threshold:
sys.stdout.write("%i-%i " % (i,j))
sys.stdout.write("\n")
"""
|
[
"sys.stdout.write",
"logging.basicConfig",
"optparse.OptionParser",
"collections.defaultdict",
"sys.stderr.write",
"os.path.join"
] |
[((106, 129), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (127, 129), False, 'import optparse, sys, os, logging\n'), ((1333, 1383), 'sys.stderr.write', 'sys.stderr.write', (['"""Training using EM algorithm..."""'], {}), "('Training using EM algorithm...')\n", (1349, 1383), False, 'import optparse, sys, os, logging\n'), ((1514, 1530), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1525, 1530), False, 'from collections import defaultdict\n'), ((1541, 1557), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1552, 1557), False, 'from collections import defaultdict\n'), ((1569, 1585), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1580, 1585), False, 'from collections import defaultdict\n'), ((1598, 1614), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1609, 1614), False, 'from collections import defaultdict\n'), ((1659, 1675), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1670, 1675), False, 'from collections import defaultdict\n'), ((1907, 1923), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1918, 1923), False, 'from collections import defaultdict\n'), ((1930, 1946), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1941, 1946), False, 'from collections import defaultdict\n'), ((1984, 2006), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (2000, 2006), False, 'import optparse, sys, os, logging\n'), ((2007, 2042), 'sys.stderr.write', 'sys.stderr.write', (['"""Initializing..."""'], {}), "('Initializing...')\n", (2023, 2042), False, 'import optparse, sys, os, logging\n'), ((2236, 2258), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (2252, 2258), False, 'import optparse, sys, os, logging\n'), ((2260, 2299), 'sys.stderr.write', 'sys.stderr.write', (['"""Done initializing\n"""'], {}), "('Done initializing\\n')\n", (2276, 2299), False, 'import optparse, sys, os, logging\n'), ((3051, 3093), 'sys.stderr.write', 'sys.stderr.write', (['"""Training Complete...\n"""'], {}), "('Training Complete...\\n')\n", (3067, 3093), False, 'import optparse, sys, os, logging\n'), ((3094, 3127), 'sys.stderr.write', 'sys.stderr.write', (['"""Aligning...\n"""'], {}), "('Aligning...\\n')\n", (3110, 3127), False, 'import optparse, sys, os, logging\n'), ((1255, 1331), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'opts.logfile', 'filemode': '"""w"""', 'level': 'logging.INFO'}), "(filename=opts.logfile, filemode='w', level=logging.INFO)\n", (1274, 1331), False, 'import optparse, sys, os, logging\n'), ((2463, 2479), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2474, 2479), False, 'from collections import defaultdict\n'), ((2492, 2508), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2503, 2508), False, 'from collections import defaultdict\n'), ((3502, 3524), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (3518, 3524), False, 'import optparse, sys, os, logging\n'), ((1096, 1139), 'os.path.join', 'os.path.join', (['opts.datadir', 'opts.fileprefix'], {}), '(opts.datadir, opts.fileprefix)\n', (1108, 1139), False, 'import optparse, sys, os, logging\n'), ((1174, 1217), 'os.path.join', 'os.path.join', (['opts.datadir', 'opts.fileprefix'], {}), '(opts.datadir, opts.fileprefix)\n', (1186, 1217), False, 'import optparse, sys, os, logging\n'), ((2214, 2235), 'sys.stderr.write', 'sys.stderr.write', (['"""."""'], {}), "('.')\n", (2230, 2235), False, 'import optparse, sys, os, logging\n'), ((3463, 3502), 'sys.stdout.write', 'sys.stdout.write', (["('%i-%i ' % (i, bestj))"], {}), "('%i-%i ' % (i, bestj))\n", (3479, 3502), False, 'import optparse, sys, os, logging\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy as _
import re
__all__ = ['DriverFactory']
class DriverFactory(object):
@classmethod
def is_driver_available(cls, name):
try:
cls.get_driver_class(name)
return True
except NotImplementedError:
return False
@classmethod
def get_driver_class(cls, driver_name):
driver_name = driver_name.lower()
# TODO: import Engines dynamically
if re.match(r'^mongo.*', driver_name):
from .mongodb import MongoDB
return MongoDB
elif re.match(r'^mysql.*', driver_name):
from .mysqldb import MySQL
return MySQL
elif re.match(r'^redis.*', driver_name):
from .redis import Redis
return Redis
elif re.match(r'^fake.*', driver_name):
from .fake import FakeDriver
return FakeDriver
raise NotImplementedError()
@classmethod
def factory(cls, databaseinfra):
if not (databaseinfra and databaseinfra.engine and databaseinfra.engine.engine_type):
raise TypeError(_("DatabaseInfra is not defined"))
driver_name = databaseinfra.engine.engine_type.name
driver_class = cls.get_driver_class(driver_name)
return driver_class(databaseinfra=databaseinfra)
|
[
"django.utils.translation.ugettext_lazy",
"re.match"
] |
[((560, 593), 're.match', 're.match', (['"""^mongo.*"""', 'driver_name'], {}), "('^mongo.*', driver_name)\n", (568, 593), False, 'import re\n'), ((677, 710), 're.match', 're.match', (['"""^mysql.*"""', 'driver_name'], {}), "('^mysql.*', driver_name)\n", (685, 710), False, 'import re\n'), ((790, 823), 're.match', 're.match', (['"""^redis.*"""', 'driver_name'], {}), "('^redis.*', driver_name)\n", (798, 823), False, 'import re\n'), ((1222, 1255), 'django.utils.translation.ugettext_lazy', '_', (['"""DatabaseInfra is not defined"""'], {}), "('DatabaseInfra is not defined')\n", (1223, 1255), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((901, 933), 're.match', 're.match', (['"""^fake.*"""', 'driver_name'], {}), "('^fake.*', driver_name)\n", (909, 933), False, 'import re\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
from config import cfg
from misc.utils import *
if cfg.DATASET == 'SHHB':
from datasets.SHHB.setting import cfg_data
elif cfg.DATASET == 'SHHA':
from datasets.SHHA.setting import cfg_data
elif cfg.DATASET == 'UCSD':
from datasets.UCSD.setting import cfg_data
elif cfg.DATASET == 'Mall':
from datasets.Mall.setting import cfg_data
elif cfg.DATASET == 'FDST':
from datasets.FDST.setting import cfg_data
class CrowdCounter(nn.Module):
def __init__(self, gpus, model_name, pretrained=True):
super(CrowdCounter, self).__init__()
self.model_name = model_name
net = None
if model_name == 'AMRNet':
from .SCC_Model.AMRNet import AMRNet as net
self.CCN = net(pretrained)
if len(gpus) > 1: # for multi gpu
self.CCN = torch.nn.DataParallel(self.CCN, device_ids=gpus).cuda(gpus[0])
else: # for one gpu
self.CCN = self.CCN.cuda()
self.loss_sum_fn = nn.L1Loss().cuda()
self.SumLoss = True
@property
def loss(self):
return self.loss_total
def loss_sum(self):
return self.loss_sum
def forward(self, img, gt_map):
count_map = self.CCN(img)
gt_map = torch.unsqueeze(gt_map, 1)
self.loss_total, self.loss_sum = self.build_loss(count_map, gt_map)
return count_map
def build_loss(self, count_map, gt_map):
loss_total, loss_sum_all = 0., 0.
if self.SumLoss:
gt_map_ = gt_map / cfg_data.LOG_PARA
kernel3, kernel4, kernel5 = 2, 4, 8
# filter3 = torch.ones(1, 1, kernel3, kernel3, requires_grad=False).cuda()
# filter4 = torch.ones(1, 1, kernel4, kernel4, requires_grad=False).cuda()
filter5 = torch.ones(1, 1, kernel5, kernel5, requires_grad=False).cuda()
# gt_lcm_3 = F.conv2d(gt_map_, filter3, stride=kernel3)
# gt_lcm_4 = F.conv2d(gt_map_, filter4, stride=kernel4)
gt_lcm_5 = F.conv2d(gt_map_, filter5, stride=kernel5)
loss_sum_all = self.loss_sum_fn(count_map, gt_lcm_5)
loss_total += loss_sum_all
return loss_total, loss_sum_all
def test_forward(self, img):
count_map = self.CCN(img)
return count_map
|
[
"torch.ones",
"torch.nn.L1Loss",
"torch.nn.functional.conv2d",
"torch.unsqueeze",
"torch.nn.DataParallel"
] |
[((1303, 1329), 'torch.unsqueeze', 'torch.unsqueeze', (['gt_map', '(1)'], {}), '(gt_map, 1)\n', (1318, 1329), False, 'import torch\n'), ((2063, 2105), 'torch.nn.functional.conv2d', 'F.conv2d', (['gt_map_', 'filter5'], {'stride': 'kernel5'}), '(gt_map_, filter5, stride=kernel5)\n', (2071, 2105), True, 'import torch.nn.functional as F\n'), ((1047, 1058), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (1056, 1058), True, 'import torch.nn as nn\n'), ((888, 936), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['self.CCN'], {'device_ids': 'gpus'}), '(self.CCN, device_ids=gpus)\n', (909, 936), False, 'import torch\n'), ((1840, 1895), 'torch.ones', 'torch.ones', (['(1)', '(1)', 'kernel5', 'kernel5'], {'requires_grad': '(False)'}), '(1, 1, kernel5, kernel5, requires_grad=False)\n', (1850, 1895), False, 'import torch\n')]
|
import numpy as np
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing, svm
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import statsmodels.api as sm
import matplotlib.dates as mdates
import warnings
import itertools
import dateutil
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV as gsc
from sklearn.linear_model import Ridge,Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
def main ():
# Using svm
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
S1,S2=AQI_SVM(data)
S3,S4=AQI_Feature_importance_SVM(data)
S5,S6=AQI_Domain_Knowledge_SVM(data)
S7,S8=AQI_without_Domain_Knowledge_SVM(data)
##Linear Regression
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
LR1,LR2=AQI(data)
LR3,LR4=AQI_Feature_importance(data)
LR5,LR6==AQI_Domain_Knowledge(data)
LR7,LR8=AQI_without_Domain_Knowledge(data)
## Predincting for next day
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
normalize(data)
y=pd.read_csv('AQI_prediction_add.csv')
LR_F1,LR_F2=AQI_Future(data,y.AQI_predicted)
LR_F3,LR_F4=AQI_Feature_importance_Future(data,y.AQI_predicted)
LR_F5,LR_F6=AQI_Domain_Knowledge_Future(data,y.AQI_predicted)
LR_F7,LR_F8=AQI_without_Domain_Knowledge_Future(data,y.AQI_predicted)
##Predicting for Autumn Season
data=pd.read_csv('autumn_data.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
data=pd.get_dummies(data, columns=[' _conds'], prefix = [' _conds'])
data=pd.get_dummies(data, columns=[' _wdire'], prefix = [' _wdire'])
data=pd.get_dummies(data, columns=['Type'], prefix = ['Type'])
LR_A1,LR_A2=AQI(data)
LR_A3,LR_A4=AQI_Feature_importance(data)
LR_A5,LR_A6=AQI_Domain_Knowledge(data)
LR_A7,LR_A8=AQI_without_Domain_Knowledge(data)
##Predicting for Summer Season
data=pd.read_csv('summer_data.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
data=pd.get_dummies(data, columns=[' _conds'], prefix = [' _conds'])
data=pd.get_dummies(data, columns=[' _wdire'], prefix = [' _wdire'])
data=pd.get_dummies(data, columns=['Type'], prefix = ['Type'])
LR_S1,LR_S2=AQI(data)
LR_S3,LR_S4=AQI_Feature_importance(data)
LR_S5,LR_S6=AQI_Domain_Knowledge(data)
LR_S7,LR_S8=AQI_without_Domain_Knowledge(data)
##Predicting for Winter Season
data=pd.read_csv('winter_data.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
data=pd.get_dummies(data, columns=[' _conds'], prefix = [' _conds'])
data=pd.get_dummies(data, columns=[' _wdire'], prefix = [' _wdire'])
data=pd.get_dummies(data, columns=['Type'], prefix = ['Type'])
LR_W1,LR_W2=AQI(data)
LR_W3,LR_W4=AQI_Feature_importance(data)
LR_W5,LR_W6=AQI_Domain_Knowledge(data)
LR_W7,LR_W8=AQI_without_Domain_Knowledge(data)
##Using Ridge
data = pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
h = BestParams(data)
## Using all features
R1,R2=AQI_Ridge(data,h)
R3,R4=AQI_Feature_importance_Ridge(data,h)
R5,R6=AQI_Domain_Knowledge_Ridge(data,h)
R7,R8=AQI_without_Domain_Knowledge_Ridge(data,h)
##Future
data = pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
h = BestParams(data)
y = pd.read_csv('AQI_prediction_add.csv')
R_F1,R_F2=AQI_Future_Ridge(data, y.AQI_predicted,h)
R_F3,R_F4=AQI_Feature_importance_Future_Ridge(data, y.AQI_predicted,h)
R_F5,R_F6=AQI_Domain_Knowledge_Future_Ridge(data, y.AQI_predicted,h)
R_F7,R_F8=AQI_without_Domain_Knowledge_Future_Ridge(data, y.AQI_predicted,h)
##using Lasso
data=pd.read_csv('Original_with_dummies.csv')
y=data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI']=y
h=BestParams(data)
L1,L2=AQI_Lasso(data,h)
L3,L4=AQI_Feature_importance_Lasso(data,h)
L5,L6=AQI_Domain_Knowledge_Lasso(data,h)
L7,L8=AQI_without_Domain_Knowledge_Lasso(data,h)
## Predincting for nxt day
data=pd.read_csv('Original_with_dummies.csv')
normalize(data)
h=BestParams(data)
y=pd.read_csv('AQI_prediction_add.csv')
L_F1,L_F2=AQI_Future_Lasso(data,y.AQI_predicted,h)
L_F3,L_F4=AQI_Feature_importance_Future_Lasso(data,y.AQI_predicted,h)
L_F5,L_F6=AQI_Domain_Knowledge_Future_Lasso(data,y.AQI_predicted,h)
L_F7,L_F8=AQI_without_Domain_Knowledge_Future_Lasso(data,y.AQI_predicted,h)
##Random forest
#All feautres
data = pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
F1,F2=AQI_RF(data)
F3,F4=AQI_Feature_importance_RF(data)
F5,F6=AQI_Domain_Knowledge_RF(data)
F7,F8=AQI_without_Domain_Knowledge_RF(data)
## Predincting for nxt day
data = pd.read_csv('Original_with_dummies.csv')
normalize(data)
y = pd.read_csv('AQI_prediction_add.csv')
F_F1,F_F2=AQI_Future_RF(data, y.AQI_predicted)
F_F3,F_F4=AQI_Feature_importance_Future_RF(data, y.AQI_predicted)
F_F5,F_F6=AQI_Domain_Knowledge_Future_RF(data, y.AQI_predicted)
F_F7,F_F8=AQI_without_Domain_Knowledge_Future_RF(data, y.AQI_predicted)
##NN
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
layer = [4,4,4]
NN1,NN2=AQI_NN(data, layer)
NN3,NN4=AQI_Feature_importance_NN(data, layer)
NN5,NN6=AQI_Domain_Knowledge_NN(data, layer)
NN7,NN8=AQI_without_Domain_Knowledge_NN(data, layer)
## Predincting for nxt day
data=pd.read_csv('Original_with_dummies.csv')
y=pd.read_csv('AQI_prediction_add.csv')
normalize(data)
NN_F1,NN_F2=AQI_Future_NN(data,y.AQI_predicted, layer)
NN_F3,NN_F4=AQI_Feature_importance_Future_NN(data,y.AQI_predicted,layer)
NN_F5,NN_F6=AQI_Domain_Knowledge_Future_NN(data,y.AQI_predicted,layer)
NN_F7,NN_F8=AQI_without_Domain_Knowledge_Future_NN(data,y.AQI_predicted, layer)
##All features v/s all models
Bar_graph (LR1,LR2,L1,L2,R1,R2,S1,S2,F1,F2,NN1,NN2)
##iMPORTANT FEATURES V/S ALL MODELS
Bar_graph (LR3,LR4,L3,L4,R3,R4,S3,S4,F3,F4,NN3,NN4)
##Future with important features V/S ALL MODELS except svm
Bar_graph_without_svm (LR_F3,LR_F4,L_F3,L_F4,R_F3,R_F4,F_F3,F_F4,NN_F3,NN_F4)
##Autumn winter and summer
Bar_graph_season (LR_A3,LR_A4,LR_S3,LR_S4,LR_W3,LR_W4)
##Best Model Analysis using Data
data = pd.read_csv('Original_with_dummies.csv')
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
train=90
test=18
tips=[]
LABELS=[]
d=[0,1,2,3,4,5,6,7,8,9]
for i in range (10):
train=train+30
test=test+6
LABELS.append(train)
tips.append(train_test_data_prepare(data, train, test, 15))
plt.plot(tips)
plt.xticks(d, LABELS)
plt.xlabel("No of Days")
plt.ylabel("RMSE")
plt.title("Models")
plt.legend()
plt.show()
#Predicting AQI using all features
def AQI(data):
y=data.AQI
data=data.drop('AQI',axis=1)
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance(data):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge(data):
y=data.AQI
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area',]]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge(data):
y=data.AQI
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future(data,y):
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future(data,y):
tree_clf = ExtraTreesRegressor()
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future(data,y):
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area','month_10','month_11',]]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future(data,y):
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
data=data.drop('month_10',axis=1)
data=data.drop('month_11',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def graph_training(y_pred,y_train):
all_samples = [i for i in range(0, 250)]
y_pred=y_pred[0:250]
y_train=y_train[0:250]
plt.plot(all_samples, y_pred,label='Predicted')
plt.plot(all_samples , y_train,label='Expected')
plt.xlabel("No of Samples")
plt.ylabel("AQI")
plt.title("Training")
plt.legend()
plt.show()
def graph_testing(y_pred,y_val):
all_samples = [i for i in range(0, 250)]
y_pred=y_pred[0:250]
y_val=y_val[0:250]
plt.plot(all_samples, y_pred,label='Predicted')
plt.plot(all_samples , y_val,label='Expected')
plt.xlabel("No of Samples")
plt.ylabel("AQI")
plt.title("Validation")
plt.legend()
plt.show()
## svm
def AQI_SVM(data):
y=data.AQI
data=data.drop('AQI',axis=1)
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = SVR(gamma='scale')
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_SVM(data):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = SVR(gamma='scale')
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_SVM(data):
y=data.AQI
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
# df[['Name', 'Qualification']]
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area',]]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = SVR(gamma='scale')
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_SVM(data):
y=data.AQI
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
# data=data.drop('month_10',axis=1)
# data=data.drop('month_11',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = SVR(gamma='scale')
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def BestParams(data):
y = data.AQI
data = data.drop('AQI', axis=1)
Hyper_params = np.array(
[ 0.011, 0.1, 0.001, 0.01,.3, .2, 0.6, .8, 0.001, 0.0001, 3, 4,1,2.4])
Reg_model = Ridge()
GSCgrid = gsc(estimator=Reg_model, param_grid=dict(alpha=Hyper_params))
GSCgrid.fit(data, y)
# print('Hyper Parameter for Ridge:', GSCgrid.best_estimator_.alpha)
return GSCgrid.best_estimator_.alpha
def normalize(data):
for c in data.columns:
mean = data[c].mean()
max = data[c].max()
min = data[c].min()
data[c] = (data[c] - min) / (max - min)
return data
def AQI_Ridge(data,h):
y=data.AQI
data=data.drop('AQI',axis=1)
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Ridge(data,h):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Ridge(data,h):
y=data.AQI
# df[['Name', 'Qualification']]
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Ridge(data,h):
y=data.AQI
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future_Ridge(data,y,h):
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future_Ridge(data,y,h):
tree_clf = ExtraTreesRegressor()
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future_Ridge(data,y,h):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future_Ridge(data,y,h):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Lasso(data,h):
y=data.AQI
data=data.drop('AQI',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Lasso(data,h):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Lasso(data,h):
y=data.AQI
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Lasso(data,h):
y=data.AQI
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr =Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future_Lasso(data,y,h):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr =Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future_Lasso(data,y,h):
tree_clf = ExtraTreesRegressor()
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future_Lasso(data,y,h):
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future_Lasso(data,y,h):
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_RF(data):
y=data.AQI
data=data.drop('AQI',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_RF(data):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_RF(data):
y=data.AQI
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_RF(data):
y=data.AQI
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr =RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future_RF(data,y):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future_RF(data,y):
tree_clf = ExtraTreesRegressor()
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future_RF(data,y):
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future_RF(data,y):
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_NN(data,layer):
y=data.AQI
data=data.drop('AQI',axis=1)
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_NN(data, layer):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_NN(data, layer):
y=data.AQI
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_NN(data,layer):
y=data.AQI
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future_NN(data,y, layer):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future_NN(data,y, layer):
tree_clf = ExtraTreesRegressor()
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future_NN(data,y,layer):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future_NN(data,y, layer):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def Bar_graph (a1,a2,b1,b2,c1,c2,d1,d2,e1,e2,f1,f2):
barWidth = 0.2
bars2 = [a2,b2,c2,d2,e2,f2]
bars1 = [a1,b1,c1,d1,e1,f1]
r1 = np.arange(len(bars1))
r2 = [x + barWidth for x in r1]
plt.bar(r1, bars1, width = barWidth, color = 'blue', edgecolor = 'black', capsize=7, label='Train')
plt.bar(r2, bars2, width = barWidth, color = 'cyan', edgecolor = 'black', capsize=7, label='Test')
plt.xticks([r + barWidth for r in range(len(bars1))], ['LinearRegression', 'LR with Lasso','LR with Ridge','SVM','random forest', 'Neural Network'])
plt.ylabel('RMSE')
plt.xlabel('Models')
plt.legend()
plt.show()
def Bar_graph_without_svm(a1,a2,b1,b2,c1,c2,d1,d2,e1,e2):
barWidth = 0.2
bars2 = [a2,b2,c2,d2,e2]
bars1 = [a1,b1,c1,d1,e1]
r1 = np.arange(len(bars1))
r2 = [x + barWidth for x in r1]
plt.bar(r1, bars1, width = barWidth, color = 'blue', edgecolor = 'black', capsize=7, label='Train')
plt.bar(r2, bars2, width = barWidth, color = 'cyan', edgecolor = 'black',capsize=7, label='Test')
plt.xticks([r + barWidth for r in range(len(bars1))], ['LinearRegression', 'LR with Lasso','LR with Ridge','random forest', 'Neural Network'])
plt.ylabel('RMSE')
plt.xlabel('Models')
plt.legend()
plt.show()
def Bar_graph_season(a1,a2,b1,b2,c1,c2):
barWidth = 0.2
bars2 = [a2,b2,c2]
bars1 = [a1,b1,c1]
r1 = np.arange(len(bars1))
r2 = [x + barWidth for x in r1]
plt.bar(r1, bars1, width = barWidth, color = 'blue', edgecolor = 'black', capsize=7, label='Train')
plt.bar(r2, bars2, width = barWidth, color = 'cyan', edgecolor = 'black',capsize=7, label='Test')
plt.xticks([r + barWidth for r in range(len(bars1))], ['Autumn', 'Summer','Winter'])
plt.ylabel('RMSE')
plt.xlabel('Seasons')
plt.legend()
plt.show()
def train_test_data_prepare(data, train, test, folds):
d_y = pd.read_csv('AQI_prediction_add.csv')
y = d_y.AQI_predicted
x_data = []
y_data = []
errors = []
for i in range(folds):
x_train = data.loc[i*(train+test):(i*(train+test)+train - 1), :]
x_test = data.loc[(i*(train+test)+train):(i+1)*(train+test)-1, :]
y_train = y.loc[i * (train + test):(i * (train + test) + train - 1)]
y_test = y.loc[(i * (train + test) + train):(i + 1) * (train + test) - 1]
regr = MLPRegressor(hidden_layer_sizes=(4, 4),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
# batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(x_train, y_train)
print("xxxx")
y_pred = regr.predict(x_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
y_pred = regr.predict(x_test)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
errors.append(np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("Cross validation test error = ", sum(errors)/len(errors))
return sum(errors)/len(errors)
main()
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xticks",
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.Lasso",
"sklearn.linear_model.Ridge",
"matplotlib.pyplot.show",
"pandas.get_dummies",
"matplotlib.pyplot.legend",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.ylabel",
"sklearn.svm.SVR",
"matplotlib.pyplot.plot",
"sklearn.neural_network.MLPRegressor",
"numpy.array",
"sklearn.ensemble.ExtraTreesRegressor",
"matplotlib.pyplot.xlabel"
] |
[((836, 876), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (847, 876), True, 'import pandas as pd\n'), ((1157, 1197), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (1168, 1197), True, 'import pandas as pd\n'), ((1480, 1520), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (1491, 1520), True, 'import pandas as pd\n'), ((1640, 1677), 'pandas.read_csv', 'pd.read_csv', (['"""AQI_prediction_add.csv"""'], {}), "('AQI_prediction_add.csv')\n", (1651, 1677), True, 'import pandas as pd\n'), ((1976, 2006), 'pandas.read_csv', 'pd.read_csv', (['"""autumn_data.csv"""'], {}), "('autumn_data.csv')\n", (1987, 2006), True, 'import pandas as pd\n'), ((2109, 2170), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "[' _conds']", 'prefix': "[' _conds']"}), "(data, columns=[' _conds'], prefix=[' _conds'])\n", (2123, 2170), True, 'import pandas as pd\n'), ((2182, 2243), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "[' _wdire']", 'prefix': "[' _wdire']"}), "(data, columns=[' _wdire'], prefix=[' _wdire'])\n", (2196, 2243), True, 'import pandas as pd\n'), ((2255, 2310), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['Type']", 'prefix': "['Type']"}), "(data, columns=['Type'], prefix=['Type'])\n", (2269, 2310), True, 'import pandas as pd\n'), ((2519, 2549), 'pandas.read_csv', 'pd.read_csv', (['"""summer_data.csv"""'], {}), "('summer_data.csv')\n", (2530, 2549), True, 'import pandas as pd\n'), ((2652, 2713), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "[' _conds']", 'prefix': "[' _conds']"}), "(data, columns=[' _conds'], prefix=[' _conds'])\n", (2666, 2713), True, 'import pandas as pd\n'), ((2725, 2786), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "[' _wdire']", 'prefix': "[' _wdire']"}), "(data, columns=[' _wdire'], prefix=[' _wdire'])\n", (2739, 2786), True, 'import pandas as pd\n'), ((2798, 2853), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['Type']", 'prefix': "['Type']"}), "(data, columns=['Type'], prefix=['Type'])\n", (2812, 2853), True, 'import pandas as pd\n'), ((3062, 3092), 'pandas.read_csv', 'pd.read_csv', (['"""winter_data.csv"""'], {}), "('winter_data.csv')\n", (3073, 3092), True, 'import pandas as pd\n'), ((3195, 3256), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "[' _conds']", 'prefix': "[' _conds']"}), "(data, columns=[' _conds'], prefix=[' _conds'])\n", (3209, 3256), True, 'import pandas as pd\n'), ((3268, 3329), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "[' _wdire']", 'prefix': "[' _wdire']"}), "(data, columns=[' _wdire'], prefix=[' _wdire'])\n", (3282, 3329), True, 'import pandas as pd\n'), ((3341, 3396), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['Type']", 'prefix': "['Type']"}), "(data, columns=['Type'], prefix=['Type'])\n", (3355, 3396), True, 'import pandas as pd\n'), ((3591, 3631), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (3602, 3631), True, 'import pandas as pd\n'), ((3974, 4014), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (3985, 4014), True, 'import pandas as pd\n'), ((4141, 4178), 'pandas.read_csv', 'pd.read_csv', (['"""AQI_prediction_add.csv"""'], {}), "('AQI_prediction_add.csv')\n", (4152, 4178), True, 'import pandas as pd\n'), ((4488, 4528), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (4499, 4528), True, 'import pandas as pd\n'), ((4851, 4891), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (4862, 4891), True, 'import pandas as pd\n'), ((4941, 4978), 'pandas.read_csv', 'pd.read_csv', (['"""AQI_prediction_add.csv"""'], {}), "('AQI_prediction_add.csv')\n", (4952, 4978), True, 'import pandas as pd\n'), ((5310, 5350), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (5321, 5350), True, 'import pandas as pd\n'), ((5640, 5680), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (5651, 5680), True, 'import pandas as pd\n'), ((5709, 5746), 'pandas.read_csv', 'pd.read_csv', (['"""AQI_prediction_add.csv"""'], {}), "('AQI_prediction_add.csv')\n", (5720, 5746), True, 'import pandas as pd\n'), ((6027, 6067), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (6038, 6067), True, 'import pandas as pd\n'), ((6411, 6451), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (6422, 6451), True, 'import pandas as pd\n'), ((6458, 6495), 'pandas.read_csv', 'pd.read_csv', (['"""AQI_prediction_add.csv"""'], {}), "('AQI_prediction_add.csv')\n", (6469, 6495), True, 'import pandas as pd\n'), ((7262, 7302), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (7273, 7302), True, 'import pandas as pd\n'), ((7312, 7369), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (7326, 7369), True, 'import pandas as pd\n'), ((7620, 7634), 'matplotlib.pyplot.plot', 'plt.plot', (['tips'], {}), '(tips)\n', (7628, 7634), True, 'import matplotlib.pyplot as plt\n'), ((7639, 7660), 'matplotlib.pyplot.xticks', 'plt.xticks', (['d', 'LABELS'], {}), '(d, LABELS)\n', (7649, 7660), True, 'import matplotlib.pyplot as plt\n'), ((7665, 7689), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""No of Days"""'], {}), "('No of Days')\n", (7675, 7689), True, 'import matplotlib.pyplot as plt\n'), ((7694, 7712), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE"""'], {}), "('RMSE')\n", (7704, 7712), True, 'import matplotlib.pyplot as plt\n'), ((7717, 7736), 'matplotlib.pyplot.title', 'plt.title', (['"""Models"""'], {}), "('Models')\n", (7726, 7736), True, 'import matplotlib.pyplot as plt\n'), ((7741, 7753), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7751, 7753), True, 'import matplotlib.pyplot as plt\n'), ((7758, 7768), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7766, 7768), True, 'import matplotlib.pyplot as plt\n'), ((7878, 7935), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (7892, 7935), True, 'import pandas as pd\n'), ((7975, 8031), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (7991, 8031), False, 'from sklearn.model_selection import train_test_split\n'), ((8043, 8061), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (8059, 8061), False, 'from sklearn.linear_model import LinearRegression\n'), ((8705, 8726), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (8724, 8726), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((8754, 8811), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (8768, 8811), True, 'import pandas as pd\n'), ((9293, 9346), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (9309, 9346), False, 'from sklearn.model_selection import train_test_split\n'), ((9358, 9376), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (9374, 9376), False, 'from sklearn.linear_model import LinearRegression\n'), ((9998, 10055), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (10012, 10055), True, 'import pandas as pd\n'), ((10170, 10223), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (10186, 10223), False, 'from sklearn.model_selection import train_test_split\n'), ((10235, 10253), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (10251, 10253), False, 'from sklearn.linear_model import LinearRegression\n'), ((10848, 10905), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (10862, 10905), True, 'import pandas as pd\n'), ((11437, 11493), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (11453, 11493), False, 'from sklearn.model_selection import train_test_split\n'), ((11505, 11523), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (11521, 11523), False, 'from sklearn.linear_model import LinearRegression\n'), ((12089, 12146), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (12103, 12146), True, 'import pandas as pd\n'), ((12186, 12242), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (12202, 12242), False, 'from sklearn.model_selection import train_test_split\n'), ((12254, 12272), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (12270, 12272), False, 'from sklearn.linear_model import LinearRegression\n'), ((12924, 12945), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (12943, 12945), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((12955, 13012), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (12969, 13012), True, 'import pandas as pd\n'), ((13461, 13514), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (13477, 13514), False, 'from sklearn.model_selection import train_test_split\n'), ((13526, 13544), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (13542, 13544), False, 'from sklearn.linear_model import LinearRegression\n'), ((14160, 14217), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (14174, 14217), True, 'import pandas as pd\n'), ((14354, 14407), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (14370, 14407), False, 'from sklearn.model_selection import train_test_split\n'), ((14419, 14437), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (14435, 14437), False, 'from sklearn.linear_model import LinearRegression\n'), ((15026, 15083), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (15040, 15083), True, 'import pandas as pd\n'), ((15692, 15748), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (15708, 15748), False, 'from sklearn.model_selection import train_test_split\n'), ((15760, 15778), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (15776, 15778), False, 'from sklearn.linear_model import LinearRegression\n'), ((16449, 16497), 'matplotlib.pyplot.plot', 'plt.plot', (['all_samples', 'y_pred'], {'label': '"""Predicted"""'}), "(all_samples, y_pred, label='Predicted')\n", (16457, 16497), True, 'import matplotlib.pyplot as plt\n'), ((16501, 16549), 'matplotlib.pyplot.plot', 'plt.plot', (['all_samples', 'y_train'], {'label': '"""Expected"""'}), "(all_samples, y_train, label='Expected')\n", (16509, 16549), True, 'import matplotlib.pyplot as plt\n'), ((16554, 16581), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""No of Samples"""'], {}), "('No of Samples')\n", (16564, 16581), True, 'import matplotlib.pyplot as plt\n'), ((16586, 16603), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""AQI"""'], {}), "('AQI')\n", (16596, 16603), True, 'import matplotlib.pyplot as plt\n'), ((16608, 16629), 'matplotlib.pyplot.title', 'plt.title', (['"""Training"""'], {}), "('Training')\n", (16617, 16629), True, 'import matplotlib.pyplot as plt\n'), ((16634, 16646), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16644, 16646), True, 'import matplotlib.pyplot as plt\n'), ((16651, 16661), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16659, 16661), True, 'import matplotlib.pyplot as plt\n'), ((16794, 16842), 'matplotlib.pyplot.plot', 'plt.plot', (['all_samples', 'y_pred'], {'label': '"""Predicted"""'}), "(all_samples, y_pred, label='Predicted')\n", (16802, 16842), True, 'import matplotlib.pyplot as plt\n'), ((16846, 16892), 'matplotlib.pyplot.plot', 'plt.plot', (['all_samples', 'y_val'], {'label': '"""Expected"""'}), "(all_samples, y_val, label='Expected')\n", (16854, 16892), True, 'import matplotlib.pyplot as plt\n'), ((16897, 16924), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""No of Samples"""'], {}), "('No of Samples')\n", (16907, 16924), True, 'import matplotlib.pyplot as plt\n'), ((16929, 16946), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""AQI"""'], {}), "('AQI')\n", (16939, 16946), True, 'import matplotlib.pyplot as plt\n'), ((16951, 16974), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation"""'], {}), "('Validation')\n", (16960, 16974), True, 'import matplotlib.pyplot as plt\n'), ((16979, 16991), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16989, 16991), True, 'import matplotlib.pyplot as plt\n'), ((16996, 17006), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17004, 17006), True, 'import matplotlib.pyplot as plt\n'), ((17095, 17152), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (17109, 17152), True, 'import pandas as pd\n'), ((17192, 17248), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (17208, 17248), False, 'from sklearn.model_selection import train_test_split\n'), ((17260, 17278), 'sklearn.svm.SVR', 'SVR', ([], {'gamma': '"""scale"""'}), "(gamma='scale')\n", (17263, 17278), False, 'from sklearn.svm import SVR\n'), ((17925, 17946), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (17944, 17946), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((17974, 18031), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (17988, 18031), True, 'import pandas as pd\n'), ((18513, 18566), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (18529, 18566), False, 'from sklearn.model_selection import train_test_split\n'), ((18578, 18596), 'sklearn.svm.SVR', 'SVR', ([], {'gamma': '"""scale"""'}), "(gamma='scale')\n", (18581, 18596), False, 'from sklearn.svm import SVR\n'), ((19222, 19279), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (19236, 19279), True, 'import pandas as pd\n'), ((19430, 19483), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (19446, 19483), False, 'from sklearn.model_selection import train_test_split\n'), ((19495, 19513), 'sklearn.svm.SVR', 'SVR', ([], {'gamma': '"""scale"""'}), "(gamma='scale')\n", (19498, 19513), False, 'from sklearn.svm import SVR\n'), ((20112, 20169), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (20126, 20169), True, 'import pandas as pd\n'), ((20782, 20838), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (20798, 20838), False, 'from sklearn.model_selection import train_test_split\n'), ((20850, 20868), 'sklearn.svm.SVR', 'SVR', ([], {'gamma': '"""scale"""'}), "(gamma='scale')\n", (20853, 20868), False, 'from sklearn.svm import SVR\n'), ((21494, 21582), 'numpy.array', 'np.array', (['[0.011, 0.1, 0.001, 0.01, 0.3, 0.2, 0.6, 0.8, 0.001, 0.0001, 3, 4, 1, 2.4]'], {}), '([0.011, 0.1, 0.001, 0.01, 0.3, 0.2, 0.6, 0.8, 0.001, 0.0001, 3, 4,\n 1, 2.4])\n', (21502, 21582), True, 'import numpy as np\n'), ((21602, 21609), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (21607, 21609), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((22107, 22164), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (22121, 22164), True, 'import pandas as pd\n'), ((22204, 22260), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (22220, 22260), False, 'from sklearn.model_selection import train_test_split\n'), ((22272, 22286), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (22277, 22286), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((22937, 22958), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (22956, 22958), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((22986, 23043), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (23000, 23043), True, 'import pandas as pd\n'), ((23525, 23578), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (23541, 23578), False, 'from sklearn.model_selection import train_test_split\n'), ((23590, 23604), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (23595, 23604), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((24372, 24425), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (24388, 24425), False, 'from sklearn.model_selection import train_test_split\n'), ((24437, 24451), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (24442, 24451), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((25576, 25632), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (25592, 25632), False, 'from sklearn.model_selection import train_test_split\n'), ((25644, 25658), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (25649, 25658), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((26230, 26287), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (26244, 26287), True, 'import pandas as pd\n'), ((26327, 26383), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (26343, 26383), False, 'from sklearn.model_selection import train_test_split\n'), ((26395, 26409), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (26400, 26409), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((27069, 27090), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (27088, 27090), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((27100, 27157), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (27114, 27157), True, 'import pandas as pd\n'), ((27606, 27659), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (27622, 27659), False, 'from sklearn.model_selection import train_test_split\n'), ((27671, 27685), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (27676, 27685), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((28482, 28535), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (28498, 28535), False, 'from sklearn.model_selection import train_test_split\n'), ((28547, 28561), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (28552, 28561), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((29751, 29807), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (29767, 29807), False, 'from sklearn.model_selection import train_test_split\n'), ((29819, 29833), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (29824, 29833), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((30508, 30564), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (30524, 30564), False, 'from sklearn.model_selection import train_test_split\n'), ((30576, 30590), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (30581, 30590), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((31241, 31262), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (31260, 31262), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((31831, 31884), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (31847, 31884), False, 'from sklearn.model_selection import train_test_split\n'), ((31896, 31910), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (31901, 31910), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((32642, 32695), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (32658, 32695), False, 'from sklearn.model_selection import train_test_split\n'), ((32707, 32721), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (32712, 32721), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((33916, 33972), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (33932, 33972), False, 'from sklearn.model_selection import train_test_split\n'), ((33983, 33997), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (33988, 33997), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((34670, 34726), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (34686, 34726), False, 'from sklearn.model_selection import train_test_split\n'), ((34737, 34751), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (34742, 34751), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((35411, 35432), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (35430, 35432), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((35950, 36003), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (35966, 36003), False, 'from sklearn.model_selection import train_test_split\n'), ((36015, 36029), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (36020, 36029), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((36755, 36808), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (36771, 36808), False, 'from sklearn.model_selection import train_test_split\n'), ((36820, 36834), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (36825, 36834), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((37952, 38008), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (37968, 38008), False, 'from sklearn.model_selection import train_test_split\n'), ((38020, 38034), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (38025, 38034), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((38669, 38725), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (38685, 38725), False, 'from sklearn.model_selection import train_test_split\n'), ((38737, 38760), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (38758, 38760), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((39406, 39427), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (39425, 39427), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((39996, 40049), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (40012, 40049), False, 'from sklearn.model_selection import train_test_split\n'), ((40061, 40084), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (40082, 40084), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((40811, 40864), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (40827, 40864), False, 'from sklearn.model_selection import train_test_split\n'), ((40876, 40899), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (40897, 40899), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((42018, 42074), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (42034, 42074), False, 'from sklearn.model_selection import train_test_split\n'), ((42085, 42108), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (42106, 42108), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((42776, 42832), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (42792, 42832), False, 'from sklearn.model_selection import train_test_split\n'), ((42844, 42867), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (42865, 42867), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((43522, 43543), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (43541, 43543), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((44061, 44114), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (44077, 44114), False, 'from sklearn.model_selection import train_test_split\n'), ((44126, 44149), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (44147, 44149), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((44870, 44923), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (44886, 44923), False, 'from sklearn.model_selection import train_test_split\n'), ((44935, 44958), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (44956, 44958), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((46072, 46128), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (46088, 46128), False, 'from sklearn.model_selection import train_test_split\n'), ((46140, 46163), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (46161, 46163), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((46909, 46965), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (46925, 46965), False, 'from sklearn.model_selection import train_test_split\n'), ((46977, 47168), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (46989, 47168), False, 'from sklearn.neural_network import MLPRegressor\n'), ((48189, 48210), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (48208, 48210), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((48708, 48761), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (48724, 48761), False, 'from sklearn.model_selection import train_test_split\n'), ((48774, 48965), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (48786, 48965), False, 'from sklearn.neural_network import MLPRegressor\n'), ((50067, 50120), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (50083, 50120), False, 'from sklearn.model_selection import train_test_split\n'), ((50133, 50324), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (50145, 50324), False, 'from sklearn.neural_network import MLPRegressor\n'), ((51889, 51945), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (51905, 51945), False, 'from sklearn.model_selection import train_test_split\n'), ((51957, 52148), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (51969, 52148), False, 'from sklearn.neural_network import MLPRegressor\n'), ((53191, 53247), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (53207, 53247), False, 'from sklearn.model_selection import train_test_split\n'), ((53259, 53450), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (53271, 53450), False, 'from sklearn.neural_network import MLPRegressor\n'), ((54480, 54501), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (54499, 54501), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((55019, 55072), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (55035, 55072), False, 'from sklearn.model_selection import train_test_split\n'), ((55085, 55276), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (55097, 55276), False, 'from sklearn.neural_network import MLPRegressor\n'), ((56443, 56496), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (56459, 56496), False, 'from sklearn.model_selection import train_test_split\n'), ((56508, 56699), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (56520, 56699), False, 'from sklearn.neural_network import MLPRegressor\n'), ((58260, 58316), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (58276, 58316), False, 'from sklearn.model_selection import train_test_split\n'), ((58329, 58520), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (58341, 58520), False, 'from sklearn.neural_network import MLPRegressor\n'), ((59626, 59724), 'matplotlib.pyplot.bar', 'plt.bar', (['r1', 'bars1'], {'width': 'barWidth', 'color': '"""blue"""', 'edgecolor': '"""black"""', 'capsize': '(7)', 'label': '"""Train"""'}), "(r1, bars1, width=barWidth, color='blue', edgecolor='black', capsize\n =7, label='Train')\n", (59633, 59724), True, 'import matplotlib.pyplot as plt\n'), ((59731, 59828), 'matplotlib.pyplot.bar', 'plt.bar', (['r2', 'bars2'], {'width': 'barWidth', 'color': '"""cyan"""', 'edgecolor': '"""black"""', 'capsize': '(7)', 'label': '"""Test"""'}), "(r2, bars2, width=barWidth, color='cyan', edgecolor='black', capsize\n =7, label='Test')\n", (59738, 59828), True, 'import matplotlib.pyplot as plt\n'), ((59988, 60006), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE"""'], {}), "('RMSE')\n", (59998, 60006), True, 'import matplotlib.pyplot as plt\n'), ((60011, 60031), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Models"""'], {}), "('Models')\n", (60021, 60031), True, 'import matplotlib.pyplot as plt\n'), ((60036, 60048), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (60046, 60048), True, 'import matplotlib.pyplot as plt\n'), ((60053, 60063), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (60061, 60063), True, 'import matplotlib.pyplot as plt\n'), ((60271, 60369), 'matplotlib.pyplot.bar', 'plt.bar', (['r1', 'bars1'], {'width': 'barWidth', 'color': '"""blue"""', 'edgecolor': '"""black"""', 'capsize': '(7)', 'label': '"""Train"""'}), "(r1, bars1, width=barWidth, color='blue', edgecolor='black', capsize\n =7, label='Train')\n", (60278, 60369), True, 'import matplotlib.pyplot as plt\n'), ((60376, 60473), 'matplotlib.pyplot.bar', 'plt.bar', (['r2', 'bars2'], {'width': 'barWidth', 'color': '"""cyan"""', 'edgecolor': '"""black"""', 'capsize': '(7)', 'label': '"""Test"""'}), "(r2, bars2, width=barWidth, color='cyan', edgecolor='black', capsize\n =7, label='Test')\n", (60383, 60473), True, 'import matplotlib.pyplot as plt\n'), ((60625, 60643), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE"""'], {}), "('RMSE')\n", (60635, 60643), True, 'import matplotlib.pyplot as plt\n'), ((60648, 60668), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Models"""'], {}), "('Models')\n", (60658, 60668), True, 'import matplotlib.pyplot as plt\n'), ((60673, 60685), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (60683, 60685), True, 'import matplotlib.pyplot as plt\n'), ((60690, 60700), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (60698, 60700), True, 'import matplotlib.pyplot as plt\n'), ((60879, 60977), 'matplotlib.pyplot.bar', 'plt.bar', (['r1', 'bars1'], {'width': 'barWidth', 'color': '"""blue"""', 'edgecolor': '"""black"""', 'capsize': '(7)', 'label': '"""Train"""'}), "(r1, bars1, width=barWidth, color='blue', edgecolor='black', capsize\n =7, label='Train')\n", (60886, 60977), True, 'import matplotlib.pyplot as plt\n'), ((60984, 61081), 'matplotlib.pyplot.bar', 'plt.bar', (['r2', 'bars2'], {'width': 'barWidth', 'color': '"""cyan"""', 'edgecolor': '"""black"""', 'capsize': '(7)', 'label': '"""Test"""'}), "(r2, bars2, width=barWidth, color='cyan', edgecolor='black', capsize\n =7, label='Test')\n", (60991, 61081), True, 'import matplotlib.pyplot as plt\n'), ((61175, 61193), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE"""'], {}), "('RMSE')\n", (61185, 61193), True, 'import matplotlib.pyplot as plt\n'), ((61198, 61219), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Seasons"""'], {}), "('Seasons')\n", (61208, 61219), True, 'import matplotlib.pyplot as plt\n'), ((61224, 61236), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (61234, 61236), True, 'import matplotlib.pyplot as plt\n'), ((61241, 61251), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (61249, 61251), True, 'import matplotlib.pyplot as plt\n'), ((61319, 61356), 'pandas.read_csv', 'pd.read_csv', (['"""AQI_prediction_add.csv"""'], {}), "('AQI_prediction_add.csv')\n", (61330, 61356), True, 'import pandas as pd\n'), ((8266, 8309), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (8292, 8309), False, 'from sklearn import metrics\n'), ((8494, 8535), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (8520, 8535), False, 'from sklearn import metrics\n'), ((9581, 9624), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (9607, 9624), False, 'from sklearn import metrics\n'), ((9809, 9850), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (9835, 9850), False, 'from sklearn import metrics\n'), ((10458, 10501), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (10484, 10501), False, 'from sklearn import metrics\n'), ((10686, 10727), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (10712, 10727), False, 'from sklearn import metrics\n'), ((11728, 11771), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (11754, 11771), False, 'from sklearn import metrics\n'), ((11956, 11997), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (11982, 11997), False, 'from sklearn import metrics\n'), ((12477, 12520), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (12503, 12520), False, 'from sklearn import metrics\n'), ((12705, 12746), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (12731, 12746), False, 'from sklearn import metrics\n'), ((13749, 13792), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (13775, 13792), False, 'from sklearn import metrics\n'), ((13977, 14018), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (14003, 14018), False, 'from sklearn import metrics\n'), ((14642, 14685), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (14668, 14685), False, 'from sklearn import metrics\n'), ((14870, 14911), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (14896, 14911), False, 'from sklearn import metrics\n'), ((15983, 16026), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (16009, 16026), False, 'from sklearn import metrics\n'), ((16211, 16252), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (16237, 16252), False, 'from sklearn import metrics\n'), ((17483, 17526), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (17509, 17526), False, 'from sklearn import metrics\n'), ((17711, 17752), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (17737, 17752), False, 'from sklearn import metrics\n'), ((18801, 18844), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (18827, 18844), False, 'from sklearn import metrics\n'), ((19029, 19070), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (19055, 19070), False, 'from sklearn import metrics\n'), ((19718, 19761), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (19744, 19761), False, 'from sklearn import metrics\n'), ((19946, 19987), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (19972, 19987), False, 'from sklearn import metrics\n'), ((21073, 21116), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (21099, 21116), False, 'from sklearn import metrics\n'), ((21301, 21342), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (21327, 21342), False, 'from sklearn import metrics\n'), ((22491, 22534), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (22517, 22534), False, 'from sklearn import metrics\n'), ((22719, 22760), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (22745, 22760), False, 'from sklearn import metrics\n'), ((23809, 23852), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (23835, 23852), False, 'from sklearn import metrics\n'), ((24037, 24078), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (24063, 24078), False, 'from sklearn import metrics\n'), ((24656, 24699), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (24682, 24699), False, 'from sklearn import metrics\n'), ((24884, 24925), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (24910, 24925), False, 'from sklearn import metrics\n'), ((25863, 25906), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (25889, 25906), False, 'from sklearn import metrics\n'), ((26091, 26132), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (26117, 26132), False, 'from sklearn import metrics\n'), ((26614, 26657), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (26640, 26657), False, 'from sklearn import metrics\n'), ((26842, 26883), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (26868, 26883), False, 'from sklearn import metrics\n'), ((27890, 27933), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (27916, 27933), False, 'from sklearn import metrics\n'), ((28118, 28159), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (28144, 28159), False, 'from sklearn import metrics\n'), ((28766, 28809), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (28792, 28809), False, 'from sklearn import metrics\n'), ((28994, 29035), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (29020, 29035), False, 'from sklearn import metrics\n'), ((30038, 30081), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (30064, 30081), False, 'from sklearn import metrics\n'), ((30266, 30307), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (30292, 30307), False, 'from sklearn import metrics\n'), ((30795, 30838), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (30821, 30838), False, 'from sklearn import metrics\n'), ((31023, 31064), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (31049, 31064), False, 'from sklearn import metrics\n'), ((32115, 32158), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (32141, 32158), False, 'from sklearn import metrics\n'), ((32343, 32384), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (32369, 32384), False, 'from sklearn import metrics\n'), ((32926, 32969), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (32952, 32969), False, 'from sklearn import metrics\n'), ((33154, 33195), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (33180, 33195), False, 'from sklearn import metrics\n'), ((34202, 34245), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (34228, 34245), False, 'from sklearn import metrics\n'), ((34430, 34471), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (34456, 34471), False, 'from sklearn import metrics\n'), ((34956, 34999), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (34982, 34999), False, 'from sklearn import metrics\n'), ((35184, 35225), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (35210, 35225), False, 'from sklearn import metrics\n'), ((36234, 36277), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (36260, 36277), False, 'from sklearn import metrics\n'), ((36462, 36503), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (36488, 36503), False, 'from sklearn import metrics\n'), ((37039, 37082), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (37065, 37082), False, 'from sklearn import metrics\n'), ((37267, 37308), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (37293, 37308), False, 'from sklearn import metrics\n'), ((38239, 38282), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (38265, 38282), False, 'from sklearn import metrics\n'), ((38467, 38508), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (38493, 38508), False, 'from sklearn import metrics\n'), ((38965, 39008), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (38991, 39008), False, 'from sklearn import metrics\n'), ((39193, 39234), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (39219, 39234), False, 'from sklearn import metrics\n'), ((40289, 40332), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (40315, 40332), False, 'from sklearn import metrics\n'), ((40517, 40558), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (40543, 40558), False, 'from sklearn import metrics\n'), ((41104, 41147), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (41130, 41147), False, 'from sklearn import metrics\n'), ((41332, 41373), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (41358, 41373), False, 'from sklearn import metrics\n'), ((42313, 42356), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (42339, 42356), False, 'from sklearn import metrics\n'), ((42541, 42582), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (42567, 42582), False, 'from sklearn import metrics\n'), ((43072, 43115), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (43098, 43115), False, 'from sklearn import metrics\n'), ((43300, 43341), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (43326, 43341), False, 'from sklearn import metrics\n'), ((44354, 44397), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (44380, 44397), False, 'from sklearn import metrics\n'), ((44582, 44623), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (44608, 44623), False, 'from sklearn import metrics\n'), ((45163, 45206), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (45189, 45206), False, 'from sklearn import metrics\n'), ((45391, 45432), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (45417, 45432), False, 'from sklearn import metrics\n'), ((46368, 46411), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (46394, 46411), False, 'from sklearn import metrics\n'), ((46596, 46637), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (46622, 46637), False, 'from sklearn import metrics\n'), ((47741, 47784), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (47767, 47784), False, 'from sklearn import metrics\n'), ((47969, 48010), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (47995, 48010), False, 'from sklearn import metrics\n'), ((49538, 49581), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (49564, 49581), False, 'from sklearn import metrics\n'), ((49766, 49807), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (49792, 49807), False, 'from sklearn import metrics\n'), ((50897, 50940), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (50923, 50940), False, 'from sklearn import metrics\n'), ((51125, 51166), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (51151, 51166), False, 'from sklearn import metrics\n'), ((52721, 52764), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (52747, 52764), False, 'from sklearn import metrics\n'), ((52949, 52990), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (52975, 52990), False, 'from sklearn import metrics\n'), ((54023, 54066), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (54049, 54066), False, 'from sklearn import metrics\n'), ((54251, 54292), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (54277, 54292), False, 'from sklearn import metrics\n'), ((55849, 55892), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (55875, 55892), False, 'from sklearn import metrics\n'), ((56077, 56118), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (56103, 56118), False, 'from sklearn import metrics\n'), ((57272, 57315), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (57298, 57315), False, 'from sklearn import metrics\n'), ((57500, 57541), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (57526, 57541), False, 'from sklearn import metrics\n'), ((59093, 59136), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (59119, 59136), False, 'from sklearn import metrics\n'), ((59321, 59362), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (59347, 59362), False, 'from sklearn import metrics\n'), ((61780, 61956), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': '(4, 4)', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'random_state': '(1)'}), "(hidden_layer_sizes=(4, 4), activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, random_state=1)\n", (61792, 61956), False, 'from sklearn.neural_network import MLPRegressor\n'), ((8201, 8244), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (8227, 8244), False, 'from sklearn import metrics\n'), ((8433, 8474), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (8459, 8474), False, 'from sklearn import metrics\n'), ((9516, 9559), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (9542, 9559), False, 'from sklearn import metrics\n'), ((9748, 9789), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (9774, 9789), False, 'from sklearn import metrics\n'), ((10393, 10436), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (10419, 10436), False, 'from sklearn import metrics\n'), ((10625, 10666), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (10651, 10666), False, 'from sklearn import metrics\n'), ((11663, 11706), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (11689, 11706), False, 'from sklearn import metrics\n'), ((11895, 11936), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (11921, 11936), False, 'from sklearn import metrics\n'), ((12412, 12455), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (12438, 12455), False, 'from sklearn import metrics\n'), ((12644, 12685), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (12670, 12685), False, 'from sklearn import metrics\n'), ((13684, 13727), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (13710, 13727), False, 'from sklearn import metrics\n'), ((13916, 13957), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (13942, 13957), False, 'from sklearn import metrics\n'), ((14577, 14620), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (14603, 14620), False, 'from sklearn import metrics\n'), ((14809, 14850), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (14835, 14850), False, 'from sklearn import metrics\n'), ((15918, 15961), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (15944, 15961), False, 'from sklearn import metrics\n'), ((16150, 16191), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (16176, 16191), False, 'from sklearn import metrics\n'), ((17418, 17461), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (17444, 17461), False, 'from sklearn import metrics\n'), ((17650, 17691), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (17676, 17691), False, 'from sklearn import metrics\n'), ((18736, 18779), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (18762, 18779), False, 'from sklearn import metrics\n'), ((18968, 19009), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (18994, 19009), False, 'from sklearn import metrics\n'), ((19653, 19696), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (19679, 19696), False, 'from sklearn import metrics\n'), ((19885, 19926), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (19911, 19926), False, 'from sklearn import metrics\n'), ((21008, 21051), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (21034, 21051), False, 'from sklearn import metrics\n'), ((21240, 21281), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (21266, 21281), False, 'from sklearn import metrics\n'), ((22426, 22469), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (22452, 22469), False, 'from sklearn import metrics\n'), ((22658, 22699), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (22684, 22699), False, 'from sklearn import metrics\n'), ((23744, 23787), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (23770, 23787), False, 'from sklearn import metrics\n'), ((23976, 24017), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (24002, 24017), False, 'from sklearn import metrics\n'), ((24591, 24634), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (24617, 24634), False, 'from sklearn import metrics\n'), ((24823, 24864), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (24849, 24864), False, 'from sklearn import metrics\n'), ((25798, 25841), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (25824, 25841), False, 'from sklearn import metrics\n'), ((26030, 26071), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (26056, 26071), False, 'from sklearn import metrics\n'), ((26549, 26592), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (26575, 26592), False, 'from sklearn import metrics\n'), ((26781, 26822), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (26807, 26822), False, 'from sklearn import metrics\n'), ((27825, 27868), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (27851, 27868), False, 'from sklearn import metrics\n'), ((28057, 28098), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (28083, 28098), False, 'from sklearn import metrics\n'), ((28701, 28744), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (28727, 28744), False, 'from sklearn import metrics\n'), ((28933, 28974), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (28959, 28974), False, 'from sklearn import metrics\n'), ((29973, 30016), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (29999, 30016), False, 'from sklearn import metrics\n'), ((30205, 30246), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (30231, 30246), False, 'from sklearn import metrics\n'), ((30730, 30773), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (30756, 30773), False, 'from sklearn import metrics\n'), ((30962, 31003), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (30988, 31003), False, 'from sklearn import metrics\n'), ((32050, 32093), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (32076, 32093), False, 'from sklearn import metrics\n'), ((32282, 32323), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (32308, 32323), False, 'from sklearn import metrics\n'), ((32861, 32904), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (32887, 32904), False, 'from sklearn import metrics\n'), ((33093, 33134), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (33119, 33134), False, 'from sklearn import metrics\n'), ((34137, 34180), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (34163, 34180), False, 'from sklearn import metrics\n'), ((34369, 34410), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (34395, 34410), False, 'from sklearn import metrics\n'), ((34891, 34934), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (34917, 34934), False, 'from sklearn import metrics\n'), ((35123, 35164), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (35149, 35164), False, 'from sklearn import metrics\n'), ((36169, 36212), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (36195, 36212), False, 'from sklearn import metrics\n'), ((36401, 36442), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (36427, 36442), False, 'from sklearn import metrics\n'), ((36974, 37017), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (37000, 37017), False, 'from sklearn import metrics\n'), ((37206, 37247), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (37232, 37247), False, 'from sklearn import metrics\n'), ((38174, 38217), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (38200, 38217), False, 'from sklearn import metrics\n'), ((38406, 38447), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (38432, 38447), False, 'from sklearn import metrics\n'), ((38900, 38943), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (38926, 38943), False, 'from sklearn import metrics\n'), ((39132, 39173), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (39158, 39173), False, 'from sklearn import metrics\n'), ((40224, 40267), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (40250, 40267), False, 'from sklearn import metrics\n'), ((40456, 40497), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (40482, 40497), False, 'from sklearn import metrics\n'), ((41039, 41082), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (41065, 41082), False, 'from sklearn import metrics\n'), ((41271, 41312), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (41297, 41312), False, 'from sklearn import metrics\n'), ((42248, 42291), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (42274, 42291), False, 'from sklearn import metrics\n'), ((42480, 42521), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (42506, 42521), False, 'from sklearn import metrics\n'), ((43007, 43050), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (43033, 43050), False, 'from sklearn import metrics\n'), ((43239, 43280), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (43265, 43280), False, 'from sklearn import metrics\n'), ((44289, 44332), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (44315, 44332), False, 'from sklearn import metrics\n'), ((44521, 44562), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (44547, 44562), False, 'from sklearn import metrics\n'), ((45098, 45141), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (45124, 45141), False, 'from sklearn import metrics\n'), ((45330, 45371), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (45356, 45371), False, 'from sklearn import metrics\n'), ((46303, 46346), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (46329, 46346), False, 'from sklearn import metrics\n'), ((46535, 46576), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (46561, 46576), False, 'from sklearn import metrics\n'), ((47676, 47719), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (47702, 47719), False, 'from sklearn import metrics\n'), ((47908, 47949), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (47934, 47949), False, 'from sklearn import metrics\n'), ((49473, 49516), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (49499, 49516), False, 'from sklearn import metrics\n'), ((49705, 49746), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (49731, 49746), False, 'from sklearn import metrics\n'), ((50832, 50875), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (50858, 50875), False, 'from sklearn import metrics\n'), ((51064, 51105), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (51090, 51105), False, 'from sklearn import metrics\n'), ((52656, 52699), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (52682, 52699), False, 'from sklearn import metrics\n'), ((52888, 52929), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (52914, 52929), False, 'from sklearn import metrics\n'), ((53958, 54001), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (53984, 54001), False, 'from sklearn import metrics\n'), ((54190, 54231), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (54216, 54231), False, 'from sklearn import metrics\n'), ((55784, 55827), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (55810, 55827), False, 'from sklearn import metrics\n'), ((56016, 56057), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (56042, 56057), False, 'from sklearn import metrics\n'), ((57207, 57250), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (57233, 57250), False, 'from sklearn import metrics\n'), ((57439, 57480), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (57465, 57480), False, 'from sklearn import metrics\n'), ((59028, 59071), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (59054, 59071), False, 'from sklearn import metrics\n'), ((59260, 59301), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (59286, 59301), False, 'from sklearn import metrics\n'), ((62496, 62539), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (62522, 62539), False, 'from sklearn import metrics\n'), ((62638, 62680), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (62664, 62680), False, 'from sklearn import metrics\n'), ((62713, 62755), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (62739, 62755), False, 'from sklearn import metrics\n')]
|
# class for the truck object
# max of 16 packages
# travel at 18mph
# 3 trucks but only 2 drivers
# 8am earliest departure from hub
import Location
import Utility
import Package
# the truck object, initializes with parameters set in the assessment
# time_space complexity of O(1)
class Truck:
def __init__(self):
self.truck_number = 1
self.speed = 18
self.trip_odometer = 0.0
self.odometer = 0.0
self.time_out = 0.0
self.location: Location = Location.get_location(0)
self.max_cargo = 16
self.cargo = []
self.is_full = False
self.delivered = []
self.start_time = 0
# takes a package object and loads it into the truck objects cargo list
# updates packages values
# ensures that the maximum of packages the truck can hold is not exceeded
# time_space complexity of O(1)
def load_package(self, package: Package):
if len(self.cargo) < self.max_cargo:
self.cargo.append(package)
package.package_status = "En Route"
package.package_truck_number = self.truck_number
package.package_load_time = Utility.format_min_to_time(self.time_out + self.start_time)
else:
self.is_full = True
print(f"Truck is full did not load package #{package.package_id}")
# removes a package from the trucks cargo
# could be used if there was a transfer of packages between trucks or returned to hub without being delivered
# time_space complexity of O(1)
def remove_package(self, package):
self.cargo.remove(package)
# delivers a package from a trucks cargo
# updates package's info
# moves package data from cargo to delivered
# time_space complexity of O(N))
def deliver_package(self, package_id):
delivered_at = self.start_time + self.time_out
# updates the relevant package data upon delivery
# time_space complexity of O(1)
def update_on_delivery(package):
package.package_delivered_at = delivered_at
package.package_status = "Delivered"
self.delivered.append(package)
self.remove_package(package)
[update_on_delivery(package) for package in self.cargo if package.package_id == package_id]
# resets truck data for the start of a route
# could be used if you wanted to see data from each run the truck makes as opposed to total data
# time_space complexity of O(1)
def start_route(self):
self.time_out = 0.0
self.location = Location.get_location(0)
self.trip_odometer = 0.0
self.cargo = []
self.is_full = False
self.delivered = []
# simulates the truck moving from location to location
# updates the location attribute, as well as the odometer's and timers
# time_space complexity of O(1)
def drive_truck(self, destination_id):
destination = Location.get_location(destination_id)
distance = Location.get_distance(self.location, destination)
self.time_out += (distance / self.speed) * 60
self.trip_odometer += distance
self.odometer += distance
self.location = destination
# boolean value for whether the truck has no more packages in cargo
# time_space complexity of O(1)
def truck_is_empty(self):
if len(self.cargo) == 0:
return True
|
[
"Utility.format_min_to_time",
"Location.get_distance",
"Location.get_location"
] |
[((521, 545), 'Location.get_location', 'Location.get_location', (['(0)'], {}), '(0)\n', (542, 545), False, 'import Location\n'), ((2643, 2667), 'Location.get_location', 'Location.get_location', (['(0)'], {}), '(0)\n', (2664, 2667), False, 'import Location\n'), ((3028, 3065), 'Location.get_location', 'Location.get_location', (['destination_id'], {}), '(destination_id)\n', (3049, 3065), False, 'import Location\n'), ((3086, 3135), 'Location.get_distance', 'Location.get_distance', (['self.location', 'destination'], {}), '(self.location, destination)\n', (3107, 3135), False, 'import Location\n'), ((1199, 1258), 'Utility.format_min_to_time', 'Utility.format_min_to_time', (['(self.time_out + self.start_time)'], {}), '(self.time_out + self.start_time)\n', (1225, 1258), False, 'import Utility\n')]
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""ModelCheckpoint callback defination."""
from .callbacks import Callback
from vega.core.common.class_factory import ClassFactory, ClassType
@ClassFactory.register(ClassType.CALLBACK)
class ModelCheckpoint(Callback):
"""Callback that saves the evaluated Performance."""
def before_train(self, logs=None):
"""Be called before the training process."""
self.is_chief = self.params['is_chief']
self.do_validation = self.params['do_validation']
def after_epoch(self, epoch, logs=None):
"""Be called after each epoch."""
if self.is_chief:
self.trainer._save_checkpoint(epoch)
if not self.trainer.cfg.get('save_best_model', False):
return
self.performance = logs.get('summary_perfs', None)
best_changed = self.performance['best_valid_perfs_changed']
if best_changed:
self.trainer.output_model()
|
[
"vega.core.common.class_factory.ClassFactory.register"
] |
[((581, 622), 'vega.core.common.class_factory.ClassFactory.register', 'ClassFactory.register', (['ClassType.CALLBACK'], {}), '(ClassType.CALLBACK)\n', (602, 622), False, 'from vega.core.common.class_factory import ClassFactory, ClassType\n')]
|
import pyftdc
import datetime
p = pyftdc.FTDCParser()
start = datetime.datetime.now()
p.parse_dir('/home/jorge/diagnostic.data', lazy=False)
end = datetime.datetime.now()
t = end - start
print(t)
|
[
"pyftdc.FTDCParser",
"datetime.datetime.now"
] |
[((35, 54), 'pyftdc.FTDCParser', 'pyftdc.FTDCParser', ([], {}), '()\n', (52, 54), False, 'import pyftdc\n'), ((64, 87), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (85, 87), False, 'import datetime\n'), ((149, 172), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (170, 172), False, 'import datetime\n')]
|
from constructs import Construct
from aws_cdk import (
Duration,
aws_sqs as sqs,
aws_sns as sns,
aws_lambda as _lambda,
aws_lambda_event_sources as events,
)
lambda_timeout = Duration.seconds(15)
visibility_timeout = lambda_timeout.plus(Duration.seconds(5))
retention_period = Duration.minutes(60)
# for lambda dlq and destinations - maximum number of times to retry when the function returns an error,
# should be between 0 and 2, default 2.
lambda_retry_attempt = 2
# for sqs dlq - number of times the failed message can be dequeued from sqs before send to dead-letter queue,
# should be between 1 and 1000, default none.
sqs_max_receives = 3
def add_sns_event_source(scope: Construct, function: _lambda.Function, topic: sns.Topic):
"""
Add SNS topic as Lambda event source.
Args:
scope (Construct): the scope object, all child constructs are defined within this scope.
function: Lambda function to add event source to.
topic: SNS topic as the Lambda event source.
"""
sns_source = events.SnsEventSource(topic)
function.add_event_source(sns_source)
def add_sqs_event_source(scope: Construct, function: _lambda.Function, queue: sqs.Queue):
"""
Add SQS as Lambda event source.
Args:
scope (Construct): the scope object, all child constructs are defined within this scope.
function: Lambda function to add event source to.
queue: SQS queue as the Lambda event source.
"""
sqs_source = events.SqsEventSource(queue, batch_size=1)
alias = _lambda.Alias(scope, "alias", alias_name="CURRENT", version=function.current_version)
alias.add_event_source(sqs_source)
|
[
"aws_cdk.aws_lambda_event_sources.SqsEventSource",
"aws_cdk.Duration.seconds",
"aws_cdk.aws_lambda_event_sources.SnsEventSource",
"aws_cdk.Duration.minutes",
"aws_cdk.aws_lambda.Alias"
] |
[((196, 216), 'aws_cdk.Duration.seconds', 'Duration.seconds', (['(15)'], {}), '(15)\n', (212, 216), False, 'from aws_cdk import Duration, aws_sqs as sqs, aws_sns as sns, aws_lambda as _lambda, aws_lambda_event_sources as events\n'), ((298, 318), 'aws_cdk.Duration.minutes', 'Duration.minutes', (['(60)'], {}), '(60)\n', (314, 318), False, 'from aws_cdk import Duration, aws_sqs as sqs, aws_sns as sns, aws_lambda as _lambda, aws_lambda_event_sources as events\n'), ((258, 277), 'aws_cdk.Duration.seconds', 'Duration.seconds', (['(5)'], {}), '(5)\n', (274, 277), False, 'from aws_cdk import Duration, aws_sqs as sqs, aws_sns as sns, aws_lambda as _lambda, aws_lambda_event_sources as events\n'), ((1053, 1081), 'aws_cdk.aws_lambda_event_sources.SnsEventSource', 'events.SnsEventSource', (['topic'], {}), '(topic)\n', (1074, 1081), True, 'from aws_cdk import Duration, aws_sqs as sqs, aws_sns as sns, aws_lambda as _lambda, aws_lambda_event_sources as events\n'), ((1504, 1546), 'aws_cdk.aws_lambda_event_sources.SqsEventSource', 'events.SqsEventSource', (['queue'], {'batch_size': '(1)'}), '(queue, batch_size=1)\n', (1525, 1546), True, 'from aws_cdk import Duration, aws_sqs as sqs, aws_sns as sns, aws_lambda as _lambda, aws_lambda_event_sources as events\n'), ((1559, 1649), 'aws_cdk.aws_lambda.Alias', '_lambda.Alias', (['scope', '"""alias"""'], {'alias_name': '"""CURRENT"""', 'version': 'function.current_version'}), "(scope, 'alias', alias_name='CURRENT', version=function.\n current_version)\n", (1572, 1649), True, 'from aws_cdk import Duration, aws_sqs as sqs, aws_sns as sns, aws_lambda as _lambda, aws_lambda_event_sources as events\n')]
|
from sentence_transformers import SentenceTransformer
from process import processing_combined
import pickle as pkl
# Corpus with example sentences
def model_transformer(query_data):
df_sentences_list, df = processing_combined(query_data)
embedder = SentenceTransformer('bert-base-nli-mean-tokens')
corpus = df_sentences_list
corpus_embeddings = embedder.encode(corpus,show_progress_bar = True)
filename = 'finalized_model.sav'
pkl.dump(corpus_embeddings, open(filename, 'wb'))
return embedder, corpus, df
|
[
"sentence_transformers.SentenceTransformer",
"process.processing_combined"
] |
[((212, 243), 'process.processing_combined', 'processing_combined', (['query_data'], {}), '(query_data)\n', (231, 243), False, 'from process import processing_combined\n'), ((259, 307), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""bert-base-nli-mean-tokens"""'], {}), "('bert-base-nli-mean-tokens')\n", (278, 307), False, 'from sentence_transformers import SentenceTransformer\n')]
|
from django.http import HttpResponse
from django.shortcuts import render_to_response
import json
from . import feature_vector
from . import dist
from . import top_k
import re
import codecs
#import sys
#import imp
# imp.reload(sys)
# sys.setdefaultencoding('utf-8') #python3 don't has this method,the default on Python 3 is UTF-8 already
pl_path="D:/Clothes Search System/PL/"
kinds_dic={'0':"up_clothes",'1':"down_clothes",'2':"dress"}
def get_faq(request):
return render_to_response('faq.html', {})
def get_liscense(request):
return render_to_response('liscense.html', {})
def get_about(request):
return render_to_response('about.html', {})
def get_protocol(request):
return render_to_response('protocol.html', {})
def get_uploadImage(request):
return render_to_response('uploadImage.html', {})
def search_similar_images(request):
#print('method search_similar_images')
response_dict = {}
if request.method == 'POST':
clothes_kind = kinds_dic[request.POST["kind"]];
upload_image_path = save_file(request.FILES['upload_image'],clothes_kind) #存储上传图片并返回路径,UploadImages/
upload_image_feature_vector = feature_vector.feature_vector_of_image(upload_image_path) #特征提取
distances = dist.dists(upload_image_feature_vector, pl_path+clothes_kind+'/'+clothes_kind+"_feature.txt")#json file,计算请求图片与所有库图片
k = 20 #距离,return [(img_path,dists)...] img_path : .../kind/index
top_k_clothes = top_k.top_k_dists(distances, k) #return [(image_name)...],计算出最接近的前k个图片 img_name : i_j.jpg
image_size_file = open(pl_path+clothes_kind+'/'+clothes_kind+"_size.txt", 'r') #含图片宽高信息
image_size_dict = json.loads(image_size_file.read()) #字典化,string->dict
image_size_file.close()
clothes_info_file = open(pl_path+clothes_kind+'/'+clothes_kind+"_info.txt", 'r') #图片信息字典文件
clothes_info = clothes_info_file.read()
clothes_info_file.close()
if clothes_info[:3] == codecs.BOM_UTF8:
clothes_info = clothes_info[3:] #all clothes info,去掉前三个字符(utf-8标识)
# clothes_info = clothes_info.encode('gbk')
# print clothes_info
similar_image_dict_list = []
similar_image_url_prefix = "http://172.16.58.3:8000/Images/"+clothes_kind+"/"
for image_name in top_k_clothes:
image_dict = {}
#image_name = image_path.split('/')[-1] #分离图片名,图片名格式 i_j.jpg,第i件服饰的第j张图
clothes_index = image_name.split('_')[0] #分离图片第一索引 i
similar_image_url = '%s%s' % (similar_image_url_prefix, image_name) #http://172.16.58.3:8000/Images/{kind}/image_name 仅给一张示例照片
similar_image_size = image_size_dict[image_name] #列表
image_dict['download_url'] = similar_image_url #图片下载链接,本服务器上
image_dict['width'] = similar_image_size[0] #[1:5] 当尺寸是四位时
image_dict['height'] = similar_image_size[1] #[6:10]
info = getClotheInfo(clothes_index, clothes_info) #从图片信息库中按索引取出 (tuple)
image_dict['shopping_url'] = info[-1]
image_dict['other_info'] = '\n'.join(info[:-1])
# image_dict['shopping_url'] = get_shopping_url(clothes_info, clothes_index)
# image_dict['other_info'] = get_other_info(clothes_info, clothes_index)
# print image_dict['shopping_url']
# print image_dict['other_info']
# print clothes_index
similar_image_dict_list.append(image_dict) #图片信息字典加入返回列表
response_dict["status"] = 1
response_dict["data"] = similar_image_dict_list
return HttpResponse(json.dumps(response_dict)) #返回 图片信息 ,图片本身呢?--下载链接
def getClotheInfo(clothes_id, all_clothes_info):
regex_expression = r'"id":' + clothes_id +r'.*?"brand":"(.*?)".*?"productName":"(.*?)".*?"material":"(.*?)".*?"price":"(.*?)".*?"buyUrl":"(.*?)"'
pattern = re.compile(regex_expression)
match = pattern.search(all_clothes_info)
if match:
cinfo=list(match.groups()) #tuple can't be assigned!!!
cinfo[0]='品牌:' +cinfo[0]
cinfo[1]='品名:' +cinfo[1]
cinfo[2]='材质:' +cinfo[2]
cinfo[3]='价格:' +cinfo[3]
return cinfo #返回信息元组
else:
return ("Unknown", "Unknown", "Unknown", "Unknown", "http://item.jd.com/1547204870.html")
def save_file(file, clothes_kind): #保存上传文件
''' Little helper to save a file
'''
filename = file._get_name()
# fd = open('%s/%s' % (MEDIA_ROOT, str(path) + str(filename)), 'wb')
#print(filename)
upload_image_path = pl_path+"upload_images/"+clothes_kind+"/"+str(filename)
fd = open(upload_image_path, 'wb')
for chunk in file.chunks():
fd.write(chunk)
fd.close()
return upload_image_path
# assert False
#TODO analyse image_name, get the type of wanted image, and treat them distingushly
def get_similar_image(request, clothes_kind, image_name): #image_name 请求url中的一个capture组 , 传回请求图片
response_dict = {}
image_path = pl_path+clothes_kind+'/'+clothes_kind+'_src/'+ image_name
try:
image_data = open(image_path, 'rb').read() #读图片数据
except Exception as e:
# raise e
print(e)
response_dict["status"] = 0
response_dict["data"] = "open image error"
return HttpResponse(json.dumps(response_dict))
# check image type
# image_type = image_name.split('.')[-1]
# print image_type
if image_name.endswith('jpeg') or image_name.endswith('jpg'):
return HttpResponse(image_data, content_type="image/jpeg")
else:
return HttpResponse(image_data, content_type="image/png")
'''
def get_clothes_info(path='D:\\clothes_info.txt'): #弃用
target = open(path, 'r')
clothes_info_str = target.read()
target.close()
clothes_info_dic = json.loads(clothes_info_str)
return clothes_info_dic
def get_shopping_url(clothes_info, clothes_index): #弃用
# regExp = r'\{.+\"id\":' + clothes_index + r',.+\"buyUrl\":\"(.+)\"\}'
regExp = r'\{[^\{\}]+\"id\":' + clothes_index + r',[^\{\}]+\"buyUrl\":\"([^\{\}]+)\"\}'
# print regExp
searchObj = re.search(regExp, clothes_info, re.I|re.M)
return searchObj.groups()[0];
def get_other_info(clothes_info, clothes_index): #弃用
regExp = r'\{[^\{\}]+\"id\":' + clothes_index + r',[^\{\}]+\"brand\":\"([^\{\}\"]+)\"[^\{\}]+\"productName\":\"([^\{\}\"]+)\"[^\{\}]+\"material\":\"([^\{\}\"]+)\"[^\{\}]+\"price\":\"([^\{\}\"]+)\"\}'
searchObj = re.search(regExp, clothes_info, re.I|re.M)
other_info_dict = {}
other_info_dict['brand'] = searchObj.groups()[0]
other_info_dict['productName'] = searchObj.groups()[1]
other_info_dict['material'] = searchObj.groups()[2]
other_info_dict['price'] = searchObj.groups()[3]
return other_info_dict;
if __name__ == '__main__': #编码
f = open('clothes_info_1000_utf8.txt')
all_clothes_info = f.read()
f.close()
if all_clothes_info[:3] == codecs.BOM_UTF8:
all_clothes_info = all_clothes_info[3:]
all_clothes_info = all_clothes_info.encode('gbk')
print(getClotheInfo('1', all_clothes_info))
print(getClotheInfo('20', all_clothes_info))
print(getClotheInfo('39', all_clothes_info))
'''
|
[
"django.shortcuts.render_to_response",
"django.http.HttpResponse",
"json.dumps",
"re.compile"
] |
[((476, 510), 'django.shortcuts.render_to_response', 'render_to_response', (['"""faq.html"""', '{}'], {}), "('faq.html', {})\n", (494, 510), False, 'from django.shortcuts import render_to_response\n'), ((550, 589), 'django.shortcuts.render_to_response', 'render_to_response', (['"""liscense.html"""', '{}'], {}), "('liscense.html', {})\n", (568, 589), False, 'from django.shortcuts import render_to_response\n'), ((626, 662), 'django.shortcuts.render_to_response', 'render_to_response', (['"""about.html"""', '{}'], {}), "('about.html', {})\n", (644, 662), False, 'from django.shortcuts import render_to_response\n'), ((702, 741), 'django.shortcuts.render_to_response', 'render_to_response', (['"""protocol.html"""', '{}'], {}), "('protocol.html', {})\n", (720, 741), False, 'from django.shortcuts import render_to_response\n'), ((784, 826), 'django.shortcuts.render_to_response', 'render_to_response', (['"""uploadImage.html"""', '{}'], {}), "('uploadImage.html', {})\n", (802, 826), False, 'from django.shortcuts import render_to_response\n'), ((4092, 4120), 're.compile', 're.compile', (['regex_expression'], {}), '(regex_expression)\n', (4102, 4120), False, 'import re\n'), ((3819, 3844), 'json.dumps', 'json.dumps', (['response_dict'], {}), '(response_dict)\n', (3829, 3844), False, 'import json\n'), ((5821, 5872), 'django.http.HttpResponse', 'HttpResponse', (['image_data'], {'content_type': '"""image/jpeg"""'}), "(image_data, content_type='image/jpeg')\n", (5833, 5872), False, 'from django.http import HttpResponse\n'), ((5910, 5960), 'django.http.HttpResponse', 'HttpResponse', (['image_data'], {'content_type': '"""image/png"""'}), "(image_data, content_type='image/png')\n", (5922, 5960), False, 'from django.http import HttpResponse\n'), ((5617, 5642), 'json.dumps', 'json.dumps', (['response_dict'], {}), '(response_dict)\n', (5627, 5642), False, 'import json\n')]
|
import re
input = open("inputs/day4.txt", "r")
#credit to themanush on r/adventofcode I was very confused how to nicely read this in
lines = [line.replace("\n", " ") for line in input.read().split("\n\n")]
#part1
requiredItems = ["byr","iyr","eyr","hgt","hcl","ecl","pid"]
acceptedPP = 0
for line in lines:
if all(item in line for item in requiredItems):
acceptedPP+=1
print(acceptedPP)
#part2
acceptedPP2 = 0
for line in lines:
if all(item in line for item in requiredItems):
print("Original:", line)
fields = []
birthYear = re.search("byr:\\d{4}",line)
if (int(birthYear.group()[4:]) >= 1920 and int(birthYear.group()[4:]) <= 2002):
fields.insert(0,birthYear.group())
print("birth year is good")
issueYear = re.search("iyr:\\d{4}",line)
if (int(issueYear.group()[4:]) >= 2010 and int(issueYear.group()[4:]) <= 2020):
fields.insert(0,issueYear.group())
print("issue year is good")
expYear = re.search("eyr:\\d{4}",line)
if (int(expYear.group()[4:]) >= 2020 and int(expYear.group()[4:]) <= 2030):
fields.insert(0,expYear.group())
print("exp year is good")
height = re.search("hgt:\\d{2,3}[a-z]{2}",line)
if height:
value = re.search("\\d{2,3}", height.group())
if value:
if height.group().find("cm") != -1:
if int(value.group()) >= 150 and int(value.group()) <= 193:
print("height is good")
fields.insert(0,height.group())
else:
if int(value.group()) >= 59 and int(value.group()) <= 76:
print("height is good")
fields.insert(0,height.group())
hairColor = re.search("hcl:#[a-f0-9]{6}", line)
if hairColor:
print("hair color is good")
fields.insert(0,hairColor.group())
eyeColor = re.search("ecl:(amb|blu|brn|gry|grn|hzl|oth)",line)
if eyeColor:
print("eye color is good")
fields.insert(0,eyeColor.group())
passportID = re.search("pid:\\d{9}",line)
if passportID:
print("ID is good")
fields.insert(0,passportID.group())
if len(fields) == 7:
print("Accepted:", line)
acceptedPP2+=1
print("------")
print(acceptedPP2)
|
[
"re.search"
] |
[((570, 599), 're.search', 're.search', (['"""byr:\\\\d{4}"""', 'line'], {}), "('byr:\\\\d{4}', line)\n", (579, 599), False, 'import re\n'), ((794, 823), 're.search', 're.search', (['"""iyr:\\\\d{4}"""', 'line'], {}), "('iyr:\\\\d{4}', line)\n", (803, 823), False, 'import re\n'), ((1016, 1045), 're.search', 're.search', (['"""eyr:\\\\d{4}"""', 'line'], {}), "('eyr:\\\\d{4}', line)\n", (1025, 1045), False, 'import re\n'), ((1229, 1268), 're.search', 're.search', (['"""hgt:\\\\d{2,3}[a-z]{2}"""', 'line'], {}), "('hgt:\\\\d{2,3}[a-z]{2}', line)\n", (1238, 1268), False, 'import re\n'), ((1827, 1862), 're.search', 're.search', (['"""hcl:#[a-f0-9]{6}"""', 'line'], {}), "('hcl:#[a-f0-9]{6}', line)\n", (1836, 1862), False, 'import re\n'), ((1991, 2043), 're.search', 're.search', (['"""ecl:(amb|blu|brn|gry|grn|hzl|oth)"""', 'line'], {}), "('ecl:(amb|blu|brn|gry|grn|hzl|oth)', line)\n", (2000, 2043), False, 'import re\n'), ((2170, 2199), 're.search', 're.search', (['"""pid:\\\\d{9}"""', 'line'], {}), "('pid:\\\\d{9}', line)\n", (2179, 2199), False, 'import re\n')]
|
import random
import numpy as np
import matplotlib.pyplot as plt
from torch import tensor
from torch import cat
from torch import clamp
from torch.distributions import normal
from torch import nn
import torch.nn.functional as F
from torch import optim
from torch.utils.tensorboard import SummaryWriter
import torch
import os
print(os.environ)
import roboschool
import gym
model_name = "SAC-RoboschoolHopper-v1"
num_iterations = 3000000
learning_rate = 0.0003
discount_rate = 0.99
replay_buffer_max_size = 1000000
target_smoothing_coefficient = 0.0005
target_update_interval = 1
num_gradient_steps = 1
num_env_steps = 1
reward_scale = 5
minibatch_size = 256
writer = SummaryWriter(log_dir="./runs/v0-1mil-iter-256-node-hidden-layers-buffer-1mil")
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
cpu_device = torch.device("cpu")
# define actor network
class SACRoboschoolHopperActorNN(nn.Module):
def __init__(self):
super(SACRoboschoolHopperActorNN, self).__init__()
self.fc1 = nn.Linear(15, 256)
self.fc2 = nn.Linear(256, 256)
self.mean = nn.Linear(256, 3)
self.log_stdev = nn.Linear(256, 3)
self.normal_dist = normal.Normal(0, 1)
def forward(self, x_state):
x_state = F.relu(self.fc1(x_state))
x_state = F.relu(self.fc2(x_state))
mean = self.mean(x_state)
log_stdev = self.log_stdev(x_state)
action = mean + self.normal_dist.sample(sample_shape=log_stdev.shape) * torch.exp(log_stdev)
squashed_action = torch.tanh(action)
action_dist = normal.Normal(mean, torch.exp(log_stdev))
log_prob_squashed_a = action_dist.log_prob(action) - torch.sum(torch.log(clamp(tensor(1).view(squashed_action.shape) - squashed_action**2, min=1e-8)), dim=1) # TODO check dims
return action, log_prob_squashed_a
# define critic network
class SACRoboschoolHopperCriticNN(nn.Module):
def __init__(self):
super(SACRoboschoolHopperCriticNN, self).__init__()
self.fc1 = nn.Linear(18, 100)
self.fc2 = nn.Linear(100, 100)
self.fc3 = nn.Linear(100, 3)
def forward(self, x_state, x_action):
x = cat((x_state, x_action), dim=1) # concatenate inputs along 0th dimension
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# define soft state value network
class SACRoboschoolHopperStateValueNN(nn.Module):
def __init__(self):
super(SACRoboschoolHopperStateValueNN, self).__init__()
self.fc1 = nn.Linear(15, 100)
self.fc2 = nn.Linear(100, 100)
self.fc3 = nn.Linear(100, 1)
def forward(self, x_state):
x = F.relu(self.fc1(x_state))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# Initialize parameter vectors ψ, ψ¯, θ, φ.
state_value_net = SACRoboschoolHopperStateValueNN().to(device)
state_value_target_net = SACRoboschoolHopperStateValueNN().to(device)
critic_net_1 = SACRoboschoolHopperCriticNN().to(device)
critic_net_2 = SACRoboschoolHopperCriticNN().to(device)
actor_net = SACRoboschoolHopperActorNN().to(device)
# make the state value target net parameters the same
state_value_target_net.load_state_dict(state_value_net.state_dict())
# initialize replay buffer D
replay_buffer = []
# initialize train and test environments
env = gym.make('RoboschoolHopper-v1')
curr_state = env.reset()
curr_state = tensor(curr_state).float().to(device)
test_env = gym.make('RoboschoolHopper-v1')
curr_test_state = test_env.reset()
greatest_avg_episode_rewards = -np.inf
# initialize optimizers for each network except target (parameters updated manually)
state_value_net_optimizer = optim.Adam(state_value_net.parameters(), lr=learning_rate)
critic_net_1_optimizer = optim.Adam(critic_net_1.parameters(), lr=learning_rate)
critic_net_2_optimizer = optim.Adam(critic_net_2.parameters(), lr=learning_rate)
actor_net_optimizer = optim.Adam(actor_net.parameters(), lr=learning_rate)
# for each iteration do
for t in range(num_iterations):
# for each environment step do
# (in practice, at most one env step per gradient step)
# at ∼ πφ(at|st)
action, log_prob = actor_net(curr_state.view(1, -1,).float()).detach().to(cpu_device).numpy().squeeze()
# action_np = action.detach().to(cpu_device).numpy().squeeze()
# st+1 ∼ p(st+1|st, at)
next_state, reward, done, _ = env.step(action)
reward = reward * reward_scale
# D ← D ∪ {(st, at, r(st, at), st+1)}
replay_buffer.append((curr_state.view(1, -1, ), tensor(action).to(device).view(1, -1, ), log_prob.to(device).view(1, -1, ),
tensor(reward).float().to(device).view(1, 1, ), tensor(next_state).to(device).view(1, -1, ),
tensor(done).to(device).view(1, 1, )))
if len(replay_buffer) > replay_buffer_max_size + 10:
replay_buffer = replay_buffer[10:]
# for each gradient step do
for gradient_step in range(num_gradient_steps):
# Sample mini-batch of N transitions (s, a, r, s') from D
transitions_minibatch = random.choices(replay_buffer, k=minibatch_size)
minibatch_states, minibatch_actions, minibatch_action_log_probs, minibatch_rewards, minibatch_next_states, minibatch_dones = [cat(mb, dim=0) for mb in zip(*transitions_minibatch)]
minibatch_states = minibatch_states.float()
# ψ ← ψ − λV ∇ˆψJV (ψ)
state_value_net.zero_grad()
# state_value_error = torch.mean(0.5 * torch.mean(state_value_net(minibatch_states) - torch.mean(torch.min(critic_net_1(minibatch_states, minibatch_actions),critic_net_2(minibatch_states, minibatch_actions)) - torch.log(actor_net(minibatch_states)))) ** 2) # TODO fix?
state_value_net_loss = torch.mean(0.5 * (state_value_net(minibatch_states) - (torch.min(critic_net_1(minibatch_states, minibatch_actions), critic_net_2(minibatch_states, minibatch_actions)) - torch.log(clamp(actor_net(minibatch_states), min=1e-8)))) ** 2) # TODO fix?
state_value_net_loss.backward()
state_value_net_optimizer.step()
writer.add_scalar('Loss/state_value_net', state_value_net_loss.detach().to(cpu_device).numpy().squeeze(), t)
# θi ← θi − λQ∇ˆθiJQ(θi) for i ∈ {1, 2}
critic_net_1.zero_grad()
critic_net_1_loss = torch.mean(0.5 * (critic_net_1(minibatch_states, minibatch_actions) - (minibatch_rewards + discount_rate*state_value_target_net(minibatch_next_states)*(-minibatch_dones.float() + 1))) ** 2)
critic_net_1_loss.backward()
critic_net_1_optimizer.step()
writer.add_scalar('Loss/critic_net_1', critic_net_1_loss.detach().to(cpu_device).numpy().squeeze(), t)
critic_net_2.zero_grad()
critic_net_2_loss = torch.mean(0.5 * (critic_net_2(minibatch_states, minibatch_actions) - (minibatch_rewards + discount_rate * state_value_target_net(minibatch_next_states)*(-minibatch_dones.float() + 1))) ** 2)
critic_net_2_loss.backward()
critic_net_2_optimizer.step()
writer.add_scalar('Loss/critic_net_2', critic_net_2_loss.detach().to(cpu_device).numpy().squeeze(), t)
# φ ← φ − λπ∇ˆφJπ(φ)
actor_net.zero_grad()
minibatch_actions_new, minibatch_action_log_probs_new = actor_net(minibatch_states)
actor_net_loss = torch.mean(minibatch_action_log_probs_new - torch.min(critic_net_1(minibatch_states, minibatch_actions_new), critic_net_2(minibatch_states, minibatch_actions_new))) # TODO fix?
actor_net_loss.backward()
actor_net_optimizer.step()
writer.add_scalar('Loss/actor_net', actor_net_loss.detach().to(cpu_device).numpy().squeeze(), t)
# print(actor_net_loss.grad_fn())
# ψ¯ ← τψ + (1 − τ )ψ¯
for state_value_target_net_parameter, state_value_net_parameter in zip(state_value_target_net.parameters(), state_value_net.parameters()):
state_value_target_net_parameter.data = target_smoothing_coefficient*state_value_net_parameter + (1 - target_smoothing_coefficient)*state_value_target_net_parameter
# end for
if t % (num_iterations // 1000) == 0 or t == num_iterations - 1:
print("iter", t)
torch.save(state_value_net.state_dict(), 'models/current/' + model_name + '-state_value_net.pkl')
torch.save(state_value_target_net.state_dict(), 'models/current/' + model_name + '-state_value_target_net.pkl')
torch.save(critic_net_1.state_dict(), 'models/current/' + model_name + '-critic_net_1.pkl')
torch.save(critic_net_2.state_dict(), 'models/current/' + model_name + '-critic_net_2.pkl')
torch.save(actor_net.state_dict(), 'models/current/' + model_name + '-actor_net.pkl')
if not done:
curr_state = tensor(next_state).float().to(device)
else:
curr_state = env.reset()
curr_state = tensor(curr_state).float().to(device)
if t % (num_iterations // 25) == 0 or t == num_iterations - 1:
render = False
num_eval_episodes = 10
test_obs = test_env.reset()
episode_rewards = []
episode_reward = 0
while len(episode_rewards) < num_eval_episodes:
test_action = actor_net(tensor(test_obs).float().to(device)).detach().to(cpu_device).numpy().squeeze()
test_obs, test_reward, test_done, _ = test_env.step(test_action)
episode_reward += test_reward
if test_done:
episode_rewards.append(episode_reward)
episode_reward = 0
test_obs = test_env.reset()
if render:
test_env.render()
avg_episode_rewards = np.mean(np.asarray(episode_rewards))
writer.add_scalar('Reward/test', avg_episode_rewards, t)
if avg_episode_rewards > greatest_avg_episode_rewards:
torch.save(actor_net.state_dict(), 'models/current/best/best-' + model_name + '-actor_net.pkl')
# end for
render = True
num_eval_episodes = 10
obs = env.reset()
episode_rewards = []
episode_reward = 0
while len(episode_rewards) < num_eval_episodes:
action = actor_net(tensor(obs).float().to(device)).detach().to(cpu_device).numpy().squeeze()
obs, reward, done, _ = env.step(action)
episode_reward += reward
if done:
episode_rewards.append(episode_reward)
episode_reward = 0
obs = env.reset()
if render:
env.render()
episode_rewards = np.asarray(episode_rewards)
episode_length_histogram = plt.hist(episode_rewards)
plt.title("Episode Rewards")
plt.xlabel("Total Reward")
plt.ylabel("Frequency")
plt.savefig("episode_rewards_hist.png")
plt.savefig("models/current/episode_rewards_hist.png")
print("Mean total episode reward:", np.mean(episode_rewards))
|
[
"matplotlib.pyplot.title",
"gym.make",
"matplotlib.pyplot.hist",
"numpy.asarray",
"random.choices",
"torch.cat",
"torch.distributions.normal.Normal",
"torch.nn.Linear",
"torch.exp",
"numpy.mean",
"torch.utils.tensorboard.SummaryWriter",
"torch.device",
"matplotlib.pyplot.ylabel",
"torch.tensor",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"torch.tanh"
] |
[((672, 751), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': '"""./runs/v0-1mil-iter-256-node-hidden-layers-buffer-1mil"""'}), "(log_dir='./runs/v0-1mil-iter-256-node-hidden-layers-buffer-1mil')\n", (685, 751), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((836, 855), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (848, 855), False, 'import torch\n'), ((869, 888), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (881, 888), False, 'import torch\n'), ((3383, 3414), 'gym.make', 'gym.make', (['"""RoboschoolHopper-v1"""'], {}), "('RoboschoolHopper-v1')\n", (3391, 3414), False, 'import gym\n'), ((3503, 3534), 'gym.make', 'gym.make', (['"""RoboschoolHopper-v1"""'], {}), "('RoboschoolHopper-v1')\n", (3511, 3534), False, 'import gym\n'), ((10409, 10436), 'numpy.asarray', 'np.asarray', (['episode_rewards'], {}), '(episode_rewards)\n', (10419, 10436), True, 'import numpy as np\n'), ((10464, 10489), 'matplotlib.pyplot.hist', 'plt.hist', (['episode_rewards'], {}), '(episode_rewards)\n', (10472, 10489), True, 'import matplotlib.pyplot as plt\n'), ((10490, 10518), 'matplotlib.pyplot.title', 'plt.title', (['"""Episode Rewards"""'], {}), "('Episode Rewards')\n", (10499, 10518), True, 'import matplotlib.pyplot as plt\n'), ((10519, 10545), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Total Reward"""'], {}), "('Total Reward')\n", (10529, 10545), True, 'import matplotlib.pyplot as plt\n'), ((10546, 10569), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (10556, 10569), True, 'import matplotlib.pyplot as plt\n'), ((10570, 10609), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""episode_rewards_hist.png"""'], {}), "('episode_rewards_hist.png')\n", (10581, 10609), True, 'import matplotlib.pyplot as plt\n'), ((10610, 10664), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""models/current/episode_rewards_hist.png"""'], {}), "('models/current/episode_rewards_hist.png')\n", (10621, 10664), True, 'import matplotlib.pyplot as plt\n'), ((10701, 10725), 'numpy.mean', 'np.mean', (['episode_rewards'], {}), '(episode_rewards)\n', (10708, 10725), True, 'import numpy as np\n'), ((1060, 1078), 'torch.nn.Linear', 'nn.Linear', (['(15)', '(256)'], {}), '(15, 256)\n', (1069, 1078), False, 'from torch import nn\n'), ((1098, 1117), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(256)'], {}), '(256, 256)\n', (1107, 1117), False, 'from torch import nn\n'), ((1138, 1155), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(3)'], {}), '(256, 3)\n', (1147, 1155), False, 'from torch import nn\n'), ((1181, 1198), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(3)'], {}), '(256, 3)\n', (1190, 1198), False, 'from torch import nn\n'), ((1226, 1245), 'torch.distributions.normal.Normal', 'normal.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (1239, 1245), False, 'from torch.distributions import normal\n'), ((1572, 1590), 'torch.tanh', 'torch.tanh', (['action'], {}), '(action)\n', (1582, 1590), False, 'import torch\n'), ((2058, 2076), 'torch.nn.Linear', 'nn.Linear', (['(18)', '(100)'], {}), '(18, 100)\n', (2067, 2076), False, 'from torch import nn\n'), ((2096, 2115), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(100)'], {}), '(100, 100)\n', (2105, 2115), False, 'from torch import nn\n'), ((2135, 2152), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(3)'], {}), '(100, 3)\n', (2144, 2152), False, 'from torch import nn\n'), ((2208, 2239), 'torch.cat', 'cat', (['(x_state, x_action)'], {'dim': '(1)'}), '((x_state, x_action), dim=1)\n', (2211, 2239), False, 'from torch import cat\n'), ((2580, 2598), 'torch.nn.Linear', 'nn.Linear', (['(15)', '(100)'], {}), '(15, 100)\n', (2589, 2598), False, 'from torch import nn\n'), ((2618, 2637), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(100)'], {}), '(100, 100)\n', (2627, 2637), False, 'from torch import nn\n'), ((2657, 2674), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(1)'], {}), '(100, 1)\n', (2666, 2674), False, 'from torch import nn\n'), ((5120, 5167), 'random.choices', 'random.choices', (['replay_buffer'], {'k': 'minibatch_size'}), '(replay_buffer, k=minibatch_size)\n', (5134, 5167), False, 'import random\n'), ((1633, 1653), 'torch.exp', 'torch.exp', (['log_stdev'], {}), '(log_stdev)\n', (1642, 1653), False, 'import torch\n'), ((5302, 5316), 'torch.cat', 'cat', (['mb'], {'dim': '(0)'}), '(mb, dim=0)\n', (5305, 5316), False, 'from torch import cat\n'), ((9651, 9678), 'numpy.asarray', 'np.asarray', (['episode_rewards'], {}), '(episode_rewards)\n', (9661, 9678), True, 'import numpy as np\n'), ((1525, 1545), 'torch.exp', 'torch.exp', (['log_stdev'], {}), '(log_stdev)\n', (1534, 1545), False, 'import torch\n'), ((3453, 3471), 'torch.tensor', 'tensor', (['curr_state'], {}), '(curr_state)\n', (3459, 3471), False, 'from torch import tensor\n'), ((8750, 8768), 'torch.tensor', 'tensor', (['next_state'], {}), '(next_state)\n', (8756, 8768), False, 'from torch import tensor\n'), ((8852, 8870), 'torch.tensor', 'tensor', (['curr_state'], {}), '(curr_state)\n', (8858, 8870), False, 'from torch import tensor\n'), ((4577, 4591), 'torch.tensor', 'tensor', (['action'], {}), '(action)\n', (4583, 4591), False, 'from torch import tensor\n'), ((4727, 4745), 'torch.tensor', 'tensor', (['next_state'], {}), '(next_state)\n', (4733, 4745), False, 'from torch import tensor\n'), ((4798, 4810), 'torch.tensor', 'tensor', (['done'], {}), '(done)\n', (4804, 4810), False, 'from torch import tensor\n'), ((1742, 1751), 'torch.tensor', 'tensor', (['(1)'], {}), '(1)\n', (1748, 1751), False, 'from torch import tensor\n'), ((4679, 4693), 'torch.tensor', 'tensor', (['reward'], {}), '(reward)\n', (4685, 4693), False, 'from torch import tensor\n'), ((10094, 10105), 'torch.tensor', 'tensor', (['obs'], {}), '(obs)\n', (10100, 10105), False, 'from torch import tensor\n'), ((9197, 9213), 'torch.tensor', 'tensor', (['test_obs'], {}), '(test_obs)\n', (9203, 9213), False, 'from torch import tensor\n')]
|
"""Embed a 3D scene
in a webpage with x3d"""
from vedo import dataurl, Plotter, Volume, Text3D
plt = Plotter(size=(800,600), bg='GhostWhite')
embryo = Volume(dataurl+'embryo.tif').isosurface().decimate(0.5)
coords = embryo.points()
embryo.cmap('PRGn', coords[:,1]) # add dummy colors along y
txt = Text3D(__doc__, font='Bongas', s=350, c='red2', depth=0.05)
txt.pos(2500, 300, 500)
plt.show(embryo, txt, txt.box(pad=250), axes=1, viewup='z', zoom=1.2)
# This exports the scene and generates 2 files:
# embryo.x3d and an example embryo.html to inspect in the browser
plt.export('embryo.x3d', binary=False)
print("Type: \n firefox embryo.html")
|
[
"vedo.Plotter",
"vedo.Text3D",
"vedo.Volume"
] |
[((102, 143), 'vedo.Plotter', 'Plotter', ([], {'size': '(800, 600)', 'bg': '"""GhostWhite"""'}), "(size=(800, 600), bg='GhostWhite')\n", (109, 143), False, 'from vedo import dataurl, Plotter, Volume, Text3D\n'), ((301, 360), 'vedo.Text3D', 'Text3D', (['__doc__'], {'font': '"""Bongas"""', 's': '(350)', 'c': '"""red2"""', 'depth': '(0.05)'}), "(__doc__, font='Bongas', s=350, c='red2', depth=0.05)\n", (307, 360), False, 'from vedo import dataurl, Plotter, Volume, Text3D\n'), ((153, 183), 'vedo.Volume', 'Volume', (["(dataurl + 'embryo.tif')"], {}), "(dataurl + 'embryo.tif')\n", (159, 183), False, 'from vedo import dataurl, Plotter, Volume, Text3D\n')]
|
import h5py
import os
import argparse
import numpy as np
import pandas as pd
import scanpy as sc
def cluster(args):
"""
Clustering cells after computing pca and neiborhood distances.
"""
input = args.input
out = args.out
dpi = args.dpi
figsize = args.figsize
figure_type = args.figure_type
show = args.show
project = args.project if (args.project == "") else ("_" + args.project)
resolution = args.resolution
n_neighbors = args.n_neighbors
n_pcs = args.n_pcs
#method = args.method
#metric = args.metric
color_gene = args.color_gene
key_added = args.key_added
# set scanpy parameters
sc.settings.verbosity = 3 # verbosity: errors (0), warnings (1), info (2), hints (3)
# in scanpy version 1.6.1 tutorial: sc.logging.print_header()
sc.logging.print_version_and_date()
sc.logging.print_versions()
# default figsize=None, means it doesn't change the seaborn defined default parameters
sc.settings.set_figure_params(dpi=dpi, facecolor='white', figsize=figsize)
adata = sc.read_h5ad(input)
### Computing, embedding, and clustering the neighborhood graph
# defaults are: n_neighbors= 15, n_pcs=None
sc.pp.neighbors(adata, n_neighbors=n_neighbors, n_pcs=n_pcs)
sc.tl.umap(adata)
# plot umap using raw data: normalized and logarithimized but not regressed out
# sc.pl.umap(adata, color=color, save="_on_raw_"+project+"."+figure_type)
# plot umap using scaled and corrected gene expression
# sc.pl.umap(adata, color=color, use_raw=False, save="_"+project+"."+figure_type)
# cluster using leeiden graph-clustering method
# default resolution=1.0
sc.tl.leiden(adata, resolution=resolution, key_added=key_added)
sc.pl.umap(adata, color=color_gene, show=show, save="_after_leiden"+project+"."+figure_type)
adata.write(out)
def main():
parser = argparse.ArgumentParser(description="Arguments for scRNA-seq Clustering")
# basic parameters
parser.add_argument("-i", "--input", type=str, help="the path of count_after_QC.h5ad file", default="count_after_QC.h5ad")
parser.add_argument("-d", "--dpi", type=int, help="the resolution of the output figure", default=80)
parser.add_argument("-f", "--figure_type", type=str, help="the export type of plots, e.g., png, pdf, or svg", default="pdf")
parser.add_argument("-p", "--project", type=str, help="the project name", default="")
parser.add_argument("-o", "--out", type=str, help="the file name to save the anndata object", default="after_leiden.h5ad")
parser.add_argument("-s", "--figsize", type=float, nargs=2, help="the size of output figure, use 2 numbers, e.g., 2 2")
parser.add_argument("-S", "--show", type=lambda x: (str(x).lower() in ['true', "1", "yes"]), help="block output figures on the screen by providing no, false, or 0")
# umap parmeters
parser.add_argument("-n", "--n_neighbors", type=int, help="the size of local neiborhood for manifold approximation", default=15)
parser.add_argument("-P", "--n_pcs", type=int, help="the number of PCs to use", default=None)
parser.add_argument("-m", "--method", type=str, help="the method for neighborhood graph, either ‘umap’, ‘gauss’, ‘rapids’", default="umap")
parser.add_argument("-M", "--metric", type=str, help="the metric for neighborhood graph, [‘cityblock’, ‘cosine’, ‘euclidean’, ‘l1’, ‘l2’, ‘manhattan’], Literal[‘braycurtis’, ‘canberra’, ‘chebyshev’, ‘correlation’, ‘dice’, ‘hamming’, ‘jaccard’, ‘kulsinski’, ‘mahalanobis’, ‘minkowski’, ‘rogerstanimoto’, ‘russellrao’, ‘seuclidean’, ‘sokalmichener’, ‘sokalsneath’, ‘sqeuclidean’, ‘yule’],", default="euclidean")
# leiden parameters
parser.add_argument("-r", "--resolution", type=float, help="the resolution for leiden", default=1.0)
# color parameters and key names to be stored in adata
parser.add_argument("-C", "--color_gene", type=str, nargs="*", help="define a list of genes (e.g., MAP2 TEME199 TMEM106B), a key of leiden (e.g., 'leiden' or other key_added like 'leiden_0.6'), or both as color hues in umap plot", default="leiden")
# parser.add_argument("-g", "--gene_list", type=str, nargs="+", action="store", dest="list", help="define a list of genes to show in umap, e.g., MAP2 TEME199 NIL", default=['leiden'])
parser.add_argument("-k", "--key_added", type=str, help="the key name of a ledien anaysis to be addeed to anndata", default='leiden')
parser.set_defaults(func=cluster)
args = parser.parse_args()
args.func(args)
print()
print(f"The arguments are {args}")
if __name__ == "__main__":
main()
|
[
"scanpy.tl.umap",
"scanpy.logging.print_versions",
"argparse.ArgumentParser",
"scanpy.settings.set_figure_params",
"scanpy.pp.neighbors",
"scanpy.read_h5ad",
"scanpy.tl.leiden",
"scanpy.logging.print_version_and_date",
"scanpy.pl.umap"
] |
[((823, 858), 'scanpy.logging.print_version_and_date', 'sc.logging.print_version_and_date', ([], {}), '()\n', (856, 858), True, 'import scanpy as sc\n'), ((863, 890), 'scanpy.logging.print_versions', 'sc.logging.print_versions', ([], {}), '()\n', (888, 890), True, 'import scanpy as sc\n'), ((986, 1060), 'scanpy.settings.set_figure_params', 'sc.settings.set_figure_params', ([], {'dpi': 'dpi', 'facecolor': '"""white"""', 'figsize': 'figsize'}), "(dpi=dpi, facecolor='white', figsize=figsize)\n", (1015, 1060), True, 'import scanpy as sc\n'), ((1074, 1093), 'scanpy.read_h5ad', 'sc.read_h5ad', (['input'], {}), '(input)\n', (1086, 1093), True, 'import scanpy as sc\n'), ((1215, 1275), 'scanpy.pp.neighbors', 'sc.pp.neighbors', (['adata'], {'n_neighbors': 'n_neighbors', 'n_pcs': 'n_pcs'}), '(adata, n_neighbors=n_neighbors, n_pcs=n_pcs)\n', (1230, 1275), True, 'import scanpy as sc\n'), ((1280, 1297), 'scanpy.tl.umap', 'sc.tl.umap', (['adata'], {}), '(adata)\n', (1290, 1297), True, 'import scanpy as sc\n'), ((1691, 1754), 'scanpy.tl.leiden', 'sc.tl.leiden', (['adata'], {'resolution': 'resolution', 'key_added': 'key_added'}), '(adata, resolution=resolution, key_added=key_added)\n', (1703, 1754), True, 'import scanpy as sc\n'), ((1759, 1861), 'scanpy.pl.umap', 'sc.pl.umap', (['adata'], {'color': 'color_gene', 'show': 'show', 'save': "('_after_leiden' + project + '.' + figure_type)"}), "(adata, color=color_gene, show=show, save='_after_leiden' +\n project + '.' + figure_type)\n", (1769, 1861), True, 'import scanpy as sc\n'), ((1899, 1972), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Arguments for scRNA-seq Clustering"""'}), "(description='Arguments for scRNA-seq Clustering')\n", (1922, 1972), False, 'import argparse\n')]
|
import sys
from collections import OrderedDict
from makefun import wraps
try: # python 3+
from inspect import signature, Signature
except ImportError:
from funcsigs import signature, Signature
try: # python 3.5+
from typing import Tuple, Callable, Union, Iterable
except ImportError:
pass
from decopatch import function_decorator, DECORATED
from autoclass.utils import read_fields_from_init
@function_decorator
def autoargs(include=None, # type: Union[str, Tuple[str]]
exclude=None, # type: Union[str, Tuple[str]]
f=DECORATED
):
"""
Defines a decorator with parameters, to automatically assign the inputs of a function to self PRIOR to executing
the function. In other words:
```
@autoargs
def myfunc(a):
print('hello')
```
will create the equivalent of
```
def myfunc(a):
self.a = a
print('hello')
```
Initial code from http://stackoverflow.com/questions/3652851/what-is-the-best-way-to-do-automatic-attribute-assignment-in-python-and-is-it-a#answer-3653049
:param include: a tuple of attribute names to include in the auto-assignment. If None, all arguments will be
included by default
:param exclude: a tuple of attribute names to exclude from the auto-assignment. In such case, include should be None
:return:
"""
return autoargs_decorate(f, include=include, exclude=exclude)
def autoargs_decorate(func, # type: Callable
include=None, # type: Union[str, Tuple[str]]
exclude=None # type: Union[str, Tuple[str]]
):
# type: (...) -> Callable
"""
Defines a decorator with parameters, to automatically assign the inputs of a function to self PRIOR to executing
the function. This is the inline way to apply the decorator
```
myfunc2 = autoargs_decorate(myfunc)
```
See autoargs for details.
:param func: the function to wrap
:param include: a tuple of attribute names to include in the auto-assignment. If None, all arguments will be
included by default
:param exclude: a tuple of attribute names to exclude from the auto-assignment. In such case, include should be None
:return:
"""
# retrieve and filter the names
selected_names, func_sig = read_fields_from_init(func, include=include, exclude=exclude, caller="@autoargs")
# finally create the new function (a wrapper)
return _autoargs_decorate(func, func_sig, selected_names)
def _autoargs_decorate(func, # type: Callable
func_sig, # type: Signature
att_names # type: Iterable[str]
):
"""
Creates a wrapper around the function `func` so that all attributes in `att_names` are set to `self`
BEFORE executing the function. The original function signature may be needed in some edge cases.
:param func:
:param func_sig:
:param att_names:
:return:
"""
@wraps(func)
def init_wrapper(self, *args, **kwargs):
# bind arguments with signature: not needed anymore in nominal case since we use `makefun.wraps`
# bound_values = func_sig.bind(self, *args, **kwargs)
# apply_defaults(bound_values)
# Assign to self each of the attributes
need_introspect = False
i = -1
for i, att_name in enumerate(att_names):
try:
setattr(self, att_name, kwargs[att_name])
except KeyError:
# this may happen when the att names are BEFORE a var positional
# Switch to introspection mode
need_introspect = True
break
if need_introspect and i >= 0:
bound_values = func_sig.bind(self, *args, **kwargs)
apply_defaults(bound_values)
# noinspection PyUnboundLocalVariable
arg_dict = bound_values.arguments
for att_name in att_names[i:]:
setattr(self, att_name, arg_dict[att_name])
# finally execute the constructor function
return func(self, *args, **kwargs)
# return wrapper
return init_wrapper
if sys.version_info >= (3, 0):
# the function exists, use it
def apply_defaults(bound_values):
bound_values.apply_defaults()
else:
# the `inspect` backport (`funcsigs`) does not implement the function
# TODO when funcsigs implements PR https://github.com/aliles/funcsigs/pull/30 remove this
def apply_defaults(bound_values):
arguments = bound_values.arguments
# Creating a new one and not modifying in-place for thread safety.
new_arguments = []
for name, param in bound_values._signature.parameters.items():
try:
new_arguments.append((name, arguments[name]))
except KeyError:
if param.default is not param.empty:
val = param.default
elif param.kind is param.VAR_POSITIONAL:
val = ()
elif param.kind is param.VAR_KEYWORD:
val = {}
else:
# BoundArguments was likely created by bind_partial
continue
new_arguments.append((name, val))
bound_values.arguments = OrderedDict(new_arguments)
|
[
"collections.OrderedDict",
"autoclass.utils.read_fields_from_init",
"makefun.wraps"
] |
[((2387, 2473), 'autoclass.utils.read_fields_from_init', 'read_fields_from_init', (['func'], {'include': 'include', 'exclude': 'exclude', 'caller': '"""@autoargs"""'}), "(func, include=include, exclude=exclude, caller=\n '@autoargs')\n", (2408, 2473), False, 'from autoclass.utils import read_fields_from_init\n'), ((3073, 3084), 'makefun.wraps', 'wraps', (['func'], {}), '(func)\n', (3078, 3084), False, 'from makefun import wraps\n'), ((5409, 5435), 'collections.OrderedDict', 'OrderedDict', (['new_arguments'], {}), '(new_arguments)\n', (5420, 5435), False, 'from collections import OrderedDict\n')]
|
from operator import itemgetter
from shapefile import Reader
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy.io.img_tiles as cimgt
import matplotlib.pyplot as plt
from metpy.plots import USCOUNTIES
from statsmodels.nonparametric.smoothers_lowess import lowess
# Constants
EF_COLORS = {
"EFU": "#e0e0e0",
"EF0": "#01B0F1",
"EF1": "#92D14F",
"EF2": "#FFFF00",
"EF3": "#FFC000",
"EF4": "#C00000",
"EF5": "#CB00CC"
}
PADDING = 0.15
# Reading the shapefile of DAT points
shp_pts = Reader("nws_dat_damage_pnts.shp")
info_needed = [
{"storm_time": record[2], "rating": record[9], "windspeed": record[11], "lat": record[14], "lon": record[15]}
for record in shp_pts.records()
]
# Setting up CartoPy plot
stamen_terrain = cimgt.Stamen('terrain-background')
extent = (
min(info_needed, key=itemgetter("lon"))["lon"] - PADDING,
max(info_needed, key=itemgetter("lon"))["lon"] + PADDING,
min(info_needed, key=itemgetter("lat"))["lat"] - PADDING,
max(info_needed, key=itemgetter("lat"))["lat"] + PADDING
)
fig: plt.Figure = plt.figure(figsize=(12, 6))
ax: plt.Axes = fig.add_subplot(1, 1, 1, projection=stamen_terrain.crs)
ax.set_extent(extent)
ax.add_image(stamen_terrain, 10)
ax.add_feature(cfeature.LAND.with_scale("50m"))
ax.add_feature(cfeature.OCEAN.with_scale("50m"))
ax.add_feature(USCOUNTIES.with_scale("500k"))
# Plotting it onto the map
all_lons = []
all_lats = []
for info in info_needed:
all_lons.append(info["lon"])
all_lats.append(info["lat"])
ax.scatter(info["lon"], info["lat"], c=EF_COLORS[info["rating"]], marker="v", transform=ccrs.PlateCarree())
non_linear_fit = lowess(all_lats, all_lons)
ax.plot(non_linear_fit[:, 0], non_linear_fit[:, 1], transform=ccrs.PlateCarree())
plt.show()
|
[
"matplotlib.pyplot.show",
"cartopy.feature.OCEAN.with_scale",
"metpy.plots.USCOUNTIES.with_scale",
"statsmodels.nonparametric.smoothers_lowess.lowess",
"cartopy.feature.LAND.with_scale",
"matplotlib.pyplot.figure",
"cartopy.crs.PlateCarree",
"cartopy.io.img_tiles.Stamen",
"operator.itemgetter",
"shapefile.Reader"
] |
[((537, 570), 'shapefile.Reader', 'Reader', (['"""nws_dat_damage_pnts.shp"""'], {}), "('nws_dat_damage_pnts.shp')\n", (543, 570), False, 'from shapefile import Reader\n'), ((785, 819), 'cartopy.io.img_tiles.Stamen', 'cimgt.Stamen', (['"""terrain-background"""'], {}), "('terrain-background')\n", (797, 819), True, 'import cartopy.io.img_tiles as cimgt\n'), ((1099, 1126), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (1109, 1126), True, 'import matplotlib.pyplot as plt\n'), ((1678, 1704), 'statsmodels.nonparametric.smoothers_lowess.lowess', 'lowess', (['all_lats', 'all_lons'], {}), '(all_lats, all_lons)\n', (1684, 1704), False, 'from statsmodels.nonparametric.smoothers_lowess import lowess\n'), ((1787, 1797), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1795, 1797), True, 'import matplotlib.pyplot as plt\n'), ((1270, 1301), 'cartopy.feature.LAND.with_scale', 'cfeature.LAND.with_scale', (['"""50m"""'], {}), "('50m')\n", (1294, 1301), True, 'import cartopy.feature as cfeature\n'), ((1318, 1350), 'cartopy.feature.OCEAN.with_scale', 'cfeature.OCEAN.with_scale', (['"""50m"""'], {}), "('50m')\n", (1343, 1350), True, 'import cartopy.feature as cfeature\n'), ((1367, 1396), 'metpy.plots.USCOUNTIES.with_scale', 'USCOUNTIES.with_scale', (['"""500k"""'], {}), "('500k')\n", (1388, 1396), False, 'from metpy.plots import USCOUNTIES\n'), ((1767, 1785), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1783, 1785), True, 'import cartopy.crs as ccrs\n'), ((1639, 1657), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1655, 1657), True, 'import cartopy.crs as ccrs\n'), ((856, 873), 'operator.itemgetter', 'itemgetter', (['"""lon"""'], {}), "('lon')\n", (866, 873), False, 'from operator import itemgetter\n'), ((918, 935), 'operator.itemgetter', 'itemgetter', (['"""lon"""'], {}), "('lon')\n", (928, 935), False, 'from operator import itemgetter\n'), ((980, 997), 'operator.itemgetter', 'itemgetter', (['"""lat"""'], {}), "('lat')\n", (990, 997), False, 'from operator import itemgetter\n'), ((1042, 1059), 'operator.itemgetter', 'itemgetter', (['"""lat"""'], {}), "('lat')\n", (1052, 1059), False, 'from operator import itemgetter\n')]
|
# Databricks notebook source
# MAGIC %md
# MAGIC
# MAGIC # Spark integration with Azure Cognitive Services
# MAGIC
# MAGIC At Spark + AI Summit 2019, Microsoft announced a new set of models in the SparkML ecosystem that make it easy to leverage the Azure Cognitive Services at terabyte scales. With only a few lines of code, developers can embed cognitive services within your existing distributed machine learning pipelines in Spark ML. Additionally, these contributions allow Spark users to chain or Pipeline services together with deep networks, gradient boosted trees, and any SparkML model and apply these hybrid models in elastic and serverless distributed systems.
# MAGIC
# MAGIC From image recognition to object detection using speech recognition, translation, and text-to-speech, Azure Cognitive Services makes it easy for developers to add intelligent capabilities to their applications in any scenario. This notebook demostrates the integration of PySpark (using Azure Databricks) with Azure Cognitive Service [Text Analytics](https://azure.microsoft.com/en-us/services/cognitive-services/text-analytics/) to extract valuable information from text data.
# MAGIC
# MAGIC ## Prerequisites
# MAGIC
# MAGIC - Spark 2.4 environment
# MAGIC - You can use Azure Databricks for an integrated Spark environment
# MAGIC - Install required libraries in Spark
# MAGIC - [MMLSpark](https://mmlspark.blob.core.windows.net/website/index.html#install)
# MAGIC - [azure-cognitiveservices-language-textanalytics](https://pypi.org/project/azure-cognitiveservices-language-textanalytics/)
# MAGIC - Create [Azure Cognitive Services multi-service resource](https://docs.microsoft.com/en-us/azure/cognitive-services/cognitive-services-apis-create-account?tabs=multiservice%2Clinux)
# MAGIC - Import [Customers sample dataset](https://github.com/kawo123/azure-databricks/blob/master/data/customers.csv) into Spark environment
# MAGIC
# MAGIC ## References
# MAGIC
# MAGIC - [Spark and Azure Cognitive Services blog](https://azure.microsoft.com/en-us/blog/dear-spark-developers-welcome-to-azure-cognitive-services/)
# COMMAND ----------
from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
from msrest.authentication import CognitiveServicesCredentials
from mmlspark.cognitive import TextSentiment
from pyspark.sql.functions import col
# COMMAND ----------
# Obtain Azure Text Analytics endpoint and key. Replace <<TODO>> below with your endpoint and key
textanalytics_endpoint = '<<TODO>>' # TODO
textanalytics_key = '<<TODO>>' # TODO
# Initialize Azure Text Analytics client
client = TextAnalyticsClient(textanalytics_endpoint, CognitiveServicesCredentials(textanalytics_key))
# COMMAND ----------
# Create sample text documents for analysis
docs = [
{ 'id': '1', 'language': 'en', 'text': 'This is awesome!' },
{ 'id': '2', 'language': 'en', 'text': 'This was a waste of my time. The speaker put me to sleep.' },
{ 'id': '3', 'language': 'en', 'text': None },
{ 'id': '4', 'language': 'en', 'text': 'Hello World' }
]
# Submit text documents for sentiment analysis
resp = client.sentiment(documents=docs)
# Print sentiment analysis results
for document in resp.documents:
print("Document Id: ", document.id, ", Sentiment Score: ", "{:.2f}".format(document.score))
# COMMAND ----------
# MAGIC %md
# MAGIC You should observe output similar to below
# MAGIC
# MAGIC ```
# MAGIC Document Id: 1 , Sentiment Score: 1.00
# MAGIC Document Id: 2 , Sentiment Score: 0.11
# MAGIC Document Id: 4 , Sentiment Score: 0.76
# MAGIC ```
# COMMAND ----------
# Read customers csv
df_customers = spark\
.read\
.option('header', True)\
.option('inferSchema', True)\
.csv('/FileStore/tables/customers.csv')
df_customers.show(2)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC You should see a table with the following columns: `Customer grouping`, `Product ID`, `State`, `Customer category`, `Product market price`, `total market price`, `Notes`, `store comments`, `Customer Address`, `Gender`, `Discount`, `Date`, `Quantity`, `Discount_discrete`
# COMMAND ----------
# Define Sentiment Analysis pipeline
pipe_text_sentiment = (TextSentiment()
.setSubscriptionKey(textanalytics_key)
.setLocation('eastus')
.setLanguage('en')
.setTextCol('store comments')
.setOutputCol("StoreCommentSentimentObj")
.setErrorCol("Errors")
.setConcurrency(10)
)
# Process df_customers with the Sentiment Analysis pipeline
df_customers_sentiment = pipe_text_sentiment.transform(df_customers)
df_customers_sentiment.show(2)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC After the customer dataframe is processed by the sentiment analysis pipeline, you should see 2 additional columns in the table: `Errors` and `StoreCommentSentimentObj`. The `Errors` column contains any error message that the text analytics pipeline encounter. The `StoreCommentSentimentObj` column contains the an array of sentiment objects returned by the Text Analytics service. The sentiment object includes sentiment score and any error messages that the Text Analytics engine encounters.
# COMMAND ----------
# Extract sentiment score from store comment sentiment complex objects
df_customers_sentiment_numeric = (df_customers_sentiment
.select('*', col('StoreCommentSentimentObj').getItem(0).getItem('score').alias('StoreCommentSentimentScore'))
.drop('Errors', 'StoreCommentSentimentObj')
)
df_customers_sentiment_numeric.show(2)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC You should now see a new column `StoreCommentSentimentScore` which contains the numeric sentiment scores of store comments
|
[
"mmlspark.cognitive.TextSentiment",
"pyspark.sql.functions.col",
"msrest.authentication.CognitiveServicesCredentials"
] |
[((2663, 2710), 'msrest.authentication.CognitiveServicesCredentials', 'CognitiveServicesCredentials', (['textanalytics_key'], {}), '(textanalytics_key)\n', (2691, 2710), False, 'from msrest.authentication import CognitiveServicesCredentials\n'), ((5287, 5318), 'pyspark.sql.functions.col', 'col', (['"""StoreCommentSentimentObj"""'], {}), "('StoreCommentSentimentObj')\n", (5290, 5318), False, 'from pyspark.sql.functions import col\n'), ((4185, 4200), 'mmlspark.cognitive.TextSentiment', 'TextSentiment', ([], {}), '()\n', (4198, 4200), False, 'from mmlspark.cognitive import TextSentiment\n')]
|
import h5py
import os, sys, glob
import numpy as np
import plotly.offline as offline
from preprocessing import analysis_pp
from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils
from scipy.stats.stats import power_divergence
from scipy.stats import ttest_ind_from_stats
import csv
import scipy.signal as ss
import math
import time
from pandas import DataFrame
from scipy import optimize
import pandas as pd
import matplotlib.pyplot as plt
from collections import deque
import powerlaw
import pylab
from matplotlib.font_manager import FontProperties
from matplotlib import rc
from scipy import stats
from scipy.stats import skewnorm
import plotly.graph_objs as go
def generate_astro_single_plots(astro_plotter, astroA, output_folder):
output_experiment_path = astro_plotter.get_output_experiment_path(astroA, output_folder)
print('Plotting behaviours basic...')
#Behaviour basic
figs_basic_plots = astro_plotter.get_behaviour_basic_plots(astroA)
for fig_k in figs_basic_plots.keys():
saving_utils.save_plotly_fig(figs_basic_plots[fig_k], os.path.join(output_experiment_path, 'plots', 'behaviours_basic', '{}'.format(fig_k)), width=1000, height=400)
print('Plotting behaviour heatmaps...')
#Behaviour heatmaps
fig_heatmap_grids, fig_heatmap_dff_grids = astro_plotter.get_behaviour_contour_plots(astroA)
heatmap_grid_base_path = os.path.join(output_experiment_path, 'plots', 'behaviour_heatmaps')
for k in fig_heatmap_grids.keys():
saving_utils.save_plotly_fig(fig_heatmap_grids[k], os.path.join(heatmap_grid_base_path, k))
saving_utils.save_plotly_fig(fig_heatmap_dff_grids[k], os.path.join(heatmap_grid_base_path, k + 'dff'))
print('Plotting behaviour heatmaps (saturation)...')
fig_heatmap_grids, fig_heatmap_dff_grids = astro_plotter.get_behaviour_contour_threshold_plots(astroA, threshold=0.5)
heatmap_grid_base_path = os.path.join(output_experiment_path, 'plots', 'behaviour_heatmaps_saturation')
for k in fig_heatmap_grids.keys():
saving_utils.save_plotly_fig(fig_heatmap_grids[k], os.path.join(heatmap_grid_base_path, k))
saving_utils.save_plotly_fig(fig_heatmap_dff_grids[k], os.path.join(heatmap_grid_base_path, k + 'dff'))
print('Plotting borders...')
#Borders plot
fig_border = astro_plotter.get_border_plot(astroA)
saving_utils.save_plotly_fig(fig_border, os.path.join(output_experiment_path, 'plots' , 'borders', 'border'))
print('Plotting behaviour activity bar plot...')
behaviour_activity_path = os.path.join(output_experiment_path, 'plots', 'behaviour_activity', 'activity')
fig_behaviour_activity = astro_plotter.get_behaviour_activity_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_activity, behaviour_activity_path, width=1200, height=800)
print('Plotting behaviour event size bar plot...')
behaviour_area_path = os.path.join(output_experiment_path, 'plots', 'behaviour_areas', 'areas')
fig_behaviour_area = astro_plotter.get_behaviour_area_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_area, behaviour_area_path)
print('Plotting behaviour amplitude size bar plot...')
behaviour_amplitude_path = os.path.join(output_experiment_path, 'plots', 'signal_amplitudes', 'amplitudes')
fig_behaviour_amplitude = astro_plotter.get_behaviour_amplitude_bar_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_amplitude, behaviour_amplitude_path)
def generate_astro_comparison_plots(astro_plotter, astroA_l, output_folder, name_tag, astroA_l_pairs=None, astroA_long_l=None, n_chunks=3):
output_experiment_path_all_comparison, _, _, astroA_l_s = astro_plotter.setup_comparison_all_vars(astroA_l, os.path.join(output_folder, name_tag))
print('Plotting sizes histogram dataset comparison for each behaviour')
bh_l = ['rest', 'stick_rest', 'running', 'stick_run_ind_15']
astroA_l_filt = []
bh_l_test = ['rest', 'running', 'stick_run_ind_15', 'stick_rest']
for astroA in astroA_l:
include = True
for bh in bh_l_test:
if bh not in astroA.indices_d.keys() or bh not in astroA.activity_ratios.keys():
include = False
print(':(', astroA.print_id, bh)
if include:
astroA_l_filt.append(astroA)
day_0_1_pairs = []
if astroA_l_pairs is not None:
for astroA_l_pair in astroA_l_pairs:
if astroA_l_pair[1].day == 1:
day_0_1_pairs.append(astroA_l_pair)
print('Comparing behaviour distribution plots...')
configs = [
{'measure': 'area', 'range': [None, 60], 'nbins' : 20, 'bh_l' : ['rest', 'stick_rest', 'running', 'stick_run_ind_15'], 'mode' : 'MOE'},
{'measure': 'dffMax2', 'range': [0.6, 5], 'nbins' : 20, 'bh_l' : ['rest', 'stick_rest', 'running', 'stick_run_ind_15'], 'mode' : 'MOE'},
{'measure': 'duration', 'range' : [None, 30], 'nbins' : 10, 'bh_l' : ['rest', 'stick_rest', 'running', 'stick_run_ind_15'], 'mode' : 'MOA'}
]
for config in configs:
behaviour_l = config['bh_l']
measure = config['measure']
min_measure, max_measure = config['range']
mode = config['mode']
n_bins = config['nbins']
confidence = True
try:
measure_name = aqua_utils.get_measure_names(measure)
path = os.path.join(output_experiment_path_all_comparison, 'plots', '{}_histogram_bh_comparison'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}-mode={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence, mode))
plot, stats_d = astro_plotter.measure_distribution_bh_compare_plot(astroA_l, behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=confidence, with_stats=True, mode=mode)
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA_l[0].fr, axis='x')
if measure == 'area':
saving_utils.save_pth_plt_l_log([plot], [path], axis='x')
else:
saving_utils.save_plotly_fig(plot, path)
#saving_utils.save_pth_plt_l_log([plot], [path], axis='y')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(name)), np.array(temp_d['data']).transpose(), delimiter=",")
except Exception as e:
print('Exception: {}'.format(e))
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
delay_ranges_pairs = [[3*astroA_l[0].fr, 6*astroA_l[0].fr], [2*astroA_l[0].fr, 4*astroA_l[0].fr]]
delay_ranges_pairs = [[int(v[0]), int(v[1])] for v in delay_ranges_pairs]
before_range_3, after_range_6 = delay_ranges_pairs[0]
before_range_2, after_range_4 = delay_ranges_pairs[1]
print('Alt Proportion plots...')
# Rest to run plots
rest_to_run_setting = {
'before_bh':'rest_semi_exact',
'inds_bh':'running_exact_start',
'after_bh':'running_semi_exact',
'before_range' : before_range_3,
'after_range' : after_range_6,
'fit': False,
'delay_step_size': 10,
'confidence': True}
# Rest to run - PROPORTIONS
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_proportions'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='measure', measure='dffMax2default',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_amplitudes'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='measure', measure='time_s',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_durations'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='measure', measure='area',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_sizes'))
rest_to_run_setting['delay_step_size'] = 5
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='behaviour', bh_measure='speed',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_speed'))
# Run to rest plots
run_to_rest_setting = {
'before_bh':'running_semi_exact',
'inds_bh':'rest_start',
'after_bh':'rest_semi_exact',
'before_range' : before_range_3,
'after_range' : after_range_6,
'fit': False,
'delay_step_size': 10,
'confidence': True}
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_proportions'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='measure', measure='dffMax2default',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_amplitudes'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='measure', measure='time_s',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_durations'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='measure', measure='area',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_sizes'))
run_to_rest_setting['delay_step_size'] = 5
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='behaviour', bh_measure='speed',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_speed'))
# Run-stick-run plots
run_stick_run_setting = {
'before_bh':'running_semi_exact',
'inds_bh':'stick_exact_start',
'after_bh':'running_semi_exact',
'before_range' : before_range_2,
'after_range' : after_range_4,
'fit': False,
'delay_step_size': 10,
'confidence': True}
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_proportions'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='measure', measure='dffMax2default',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_amplitudes'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='measure', measure='time_s',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_durations'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='measure', measure='area',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_sizes'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='behaviour', bh_measure='speed',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_speed'))
#------------------------------------------------------------------------------------------------------------------
print('--------------------------------------------------------------------------------------------------')
print('Distribution of pixel values real vs fake...')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'pixel_distribution')
x_l = []
y_l = []
name_l = [astroA.print_id for astroA in astroA_l]
for astroA in astroA_l:
grid = astroA.event_grids_1min['default']
grid = np.interp(grid, (grid.min(), grid.max()), (0, 1))
grid_flat = grid.flatten()
grid_flat_nz = grid_flat[grid_flat != 0]
hist, bin_edges = np.histogram(grid_flat_nz, bins=20, range=(0,1), density=True)
hist = hist * (bin_edges[1] - bin_edges[0])
print('HIST SUM', np.sum(hist))
x_l = bin_edges[:-1]
y_l.append(hist)
y_l_fmt = []
for i in range(len(y_l[0])):
y_l_fmt.append([y[i] for y in y_l])
plot_path = os.path.join(path, 'real')
fig, stats_d = plotly_utils.plot_scatter_error(x_l, y_l_fmt, x_title='Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=True, with_details=True)
saving_utils.save_plotly_fig(fig, plot_path)
df_data = DataFrame(np.array(stats_d['data']).T, columns=x_l, index=name_l)
df_stats = DataFrame([stats_d['mean'], stats_d['conf_95'], stats_d['fit']], columns=x_l, index=['mean', 'conf_95', 'fit'])
df_data.to_csv(plot_path + '-data.csv')
df_stats.to_csv(plot_path +'-stats.csv')
sample_l_all = []
for astroA in astroA_l:
d = astro_plotter.get_individual_heatmaps_threshold_scaled(astroA, bh='default', threshold=1, num_samples=1, dff_mode=False, with_arr=True)
sample_l_all.append(d['arrs_d']['arr_r'][0])
x_l = []
y_l = []
for grid in sample_l_all:
grid = np.interp(grid, (grid.min(), grid.max()), (0, 1))
grid_flat = grid.flatten()
grid_flat_nz = grid_flat[grid_flat != 0]
#Normalize values to 1
grid_flat_nz /= np.max(grid_flat_nz)
hist, bin_edges = np.histogram(grid_flat_nz, bins=20, range=(0,1), density=True)
hist = hist * (bin_edges[1] - bin_edges[0])
print('HIST SUM', np.sum(hist))
x_l = bin_edges[:-1]
y_l.append(hist)
y_l_fmt = []
for i in range(len(y_l[0])):
y_l_fmt.append([y[i] for y in y_l])
plot_path = os.path.join(path, 'fake')
fig, stats_d = plotly_utils.plot_scatter_error(x_l, y_l_fmt, x_title='Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=False, with_details=True)
saving_utils.save_plotly_fig(fig, plot_path)
df_data = DataFrame(np.array(stats_d['data']).T, columns=x_l)
df_stats = DataFrame([stats_d['mean'], stats_d['conf_95']], columns=x_l, index=['mean', 'conf_95'])
df_data.to_csv(plot_path + '-data.csv')
df_stats.to_csv(plot_path +'-stats.csv')
#------------------------------------------------------------------------------------------------------------------
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'power_law_fit_sizes_distribution')
path = path +'/'
saving_utils.generate_directory_path(path)
pylab.rcParams['xtick.major.pad']='8'
pylab.rcParams['ytick.major.pad']='8'
rc('font', family='sans-serif')
rc('font', size=10.0)
rc('text', usetex=False)
panel_label_font = FontProperties().copy()
panel_label_font.set_weight("bold")
panel_label_font.set_size(12.0)
panel_label_font.set_family("sans-serif")
fig, x, y_l, all_events_measure_l = astro_plotter.measure_distribution_plot(astroA_l, 'default', 'area', num_bins=10, min_measure=None, max_measure=None, measure_name='area', mode='MOE', with_measure_values=True)
xmin=5
data_np = np.array(all_events_measure_l)
fit = powerlaw.Fit(data_np, discrete=True, xmin=xmin)
####
fig = fit.plot_ccdf(linewidth=3, label='Empirical Data')
fit.power_law.plot_ccdf(ax=fig, color='r', linestyle='--', label='Power law fit')
fit.lognormal.plot_ccdf(ax=fig, color='g', linestyle='--', label='Lognormal fit')
fit.exponential.plot_ccdf(ax=fig, color='b', linestyle='--', label='Exponential fit')
####
fig.set_ylabel(u"p(X≥x)")
fig.set_xlabel("Size µm^2")
handles, labels = fig.get_legend_handles_labels()
fig.legend(handles, labels, loc=3)
figname = 'EmpiricalvsFits'
plt.savefig(os.path.join(path, figname+'.svg'), bbox_inches='tight')
plt.savefig(os.path.join(path, figname+'.png'), bbox_inches='tight')
#print('POWER LAW VS LOG NORMAL', fit.distribution_compare('power_law', 'lognormal'))
#print('POWER LAW VS EXPONENTIAL cutoff at {}µm**2'.format(xmin), fit.distribution_compare('power_law', 'exponential'))
#print('POWERLAW FUNCTION: ~x**(-{})'.format(fit.power_law.alpha))
#------------------------------------------------------------------------------------------------------------------
plt.ioff()
print('Plotting Size vs Time correlation plot...')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'size_v_time_corr_ALL')
path = path+'/'
print('Generating direcotyr path', path + '/')
saving_utils.generate_directory_path(path)
areas_all = []
times_all = []
for astroA in astroA_l:
areas_all.extend(np.log(astroA.res_d['area']))
times_all.extend(astroA.res_d['time_s'])
areas_all = np.array(areas_all)
times_all = np.array(times_all)
r, p = stat_utils.get_pearsonr(times_all, areas_all)
df = pd.DataFrame({'Size': areas_all, 'Time': times_all})
title ='Size vs Time correlation plot'
text = 'r = {}, p < {}'.format(general_utils.truncate(r, 2), p)
for kind in ['reg', 'hex', 'kde']:
plotly_utils.seaborn_joint_grid(df, 'Size', 'Time', kind=kind, text=text)
plt.savefig(os.path.join(path, '{}.svg'.format(kind)))
plt.savefig(os.path.join(path, '{}.png'.format(kind)))
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
print('Plotting correlation of splitted plots in 3 parts...')
save_folder = os.path.join(output_experiment_path_all_comparison, 'data', 'split_correlation_all')
plot_folder = os.path.join(output_experiment_path_all_comparison, 'plots', 'split_correlation_all')
save_splits_pkl_path = os.path.join(save_folder, 'between_splits.pkl')
save_day_splits_pkl_path = os.path.join(save_folder, 'between_days.pkl')
save_random_pkl_path = os.path.join(save_folder, 'random.pkl')
save_bh_splits_pkl_path = os.path.join(save_folder, 'between_rest_run.pkl')
#1 random simulations
#2 (correlation between splits days with variable the splits (so not between days) 3 split correlations with each other (only day 0 and day 1). day 0 splitted 3 times and correlated between each other. same with day 1
#3 (correlation between splits days with variable the between days)) the day 0 and day 1 splitted and then compared between each other between days
#'split_correlation_all'
#4 (correlation between split days with variable the rest-run behaviour)
for bh in ['rest']:
#2
fig, res_splits_l = astro_plotter.get_between_split_split_xcorr(astroA_long_l, bh=bh, save_pkl_path=save_splits_pkl_path, n_chunks=n_chunks)
#3
fig_2, res_day_splits_l = astro_plotter.get_between_day_split_xcorr(day_0_1_pairs, bh=bh, save_pkl_path=save_day_splits_pkl_path, n_chunks=n_chunks)
#4
fig_3, res_bh_splits_l = astro_plotter.get_between_bh_split_xcorr(astroA_long_l, bh_pair=['rest','running'], save_pkl_path=save_bh_splits_pkl_path, n_chunks=n_chunks)
#1
if os.path.isfile(save_random_pkl_path):
random_l = saving_utils.load_pickle(save_random_pkl_path)
else:
random_l = []
for astroA in astroA_long_l:
random_l.extend(astro_plotter.get_random_corrs_self(astroA, bh, n_fake_samples=3))
if save_random_pkl_path is not None:
saving_utils.save_pickle(random_l, save_random_pkl_path)
x = ['Random', 'Self splits', 'Rest-Run splits', 'Day 0-1 Splits']
y = [random_l, res_splits_l, res_bh_splits_l, res_day_splits_l]
fig, stats_d = plotly_utils.plot_point_box_revised(x, y, title='Split correlations (between splits)- {}'.format(bh), x_title='', y_title='Xcorr value', with_stats=True)
saving_utils.save_plotly_fig(fig, os.path.join(plot_folder, 'splits'))
saving_utils.dict_to_csv(stats_d, os.path.join(plot_folder, 'splits' + '.csv'))
#saving_utils.save_csv_dict(stats_d, os.path.join(plot_folder, 'splits' + '.csv'), key_order=['x', 'mean', 'conf_95'])
results_dict = {x[i] : y[i] for i in range(len(x))}
saving_utils.dict_to_csv(results_dict, os.path.join(plot_folder, 'splits-data' + '.csv'))
#results_dict['x'] = x
#key_order = ['x']
#key_order.extend(x)
#saving_utils.save_csv_dict(results_dict, os.path.join(plot_folder, 'splits_data' + '.csv'), key_order=key_order)
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
print('Plotting correlation of self splitted plots...')
#STEP 1
#Take only long duration astrocytes
#Set maximum length of astrocyte duration to be 70min
#Then apply splits with xcorr
data_save_path = os.path.join(output_experiment_path_all_comparison, 'data', 'splits_self_all')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'splits_self_all', 'self_all')
y_l_l = []
x_l = []
minute_frame_splits_l = [35, 30, 25, 20, 15, 10, 5, 2]
cut_duration = 70
param_str = 'cut_{}-'.format(cut_duration) + 'splits_{}-'.format('_'.join([str(m) for m in minute_frame_splits_l]))
name_l = []
for i, astroA in enumerate(astroA_long_l):
curr_save_path = os.path.join(data_save_path, 'id_{}-{}.pkl'.format(astroA.print_id, param_str))
res_d = astro_plotter.get_compare_full_self_results_alt(astroA, cut_duration_min=cut_duration, minute_frame_splits_l=minute_frame_splits_l, save_pkl_path=curr_save_path)
y_l_l.append(res_d['y'])
x_l.append(res_d['x'])
name_l.append(astroA.print_id)
fig, stats_d = plotly_utils.plot_scatter_mult_with_avg(x_l[0], y_l_l, None, name_l, mode='lines', title='Splits self', x_title='Splits (minutes)', y_title='Correlation',
xrange=None, yrange=None, confidence=True, with_stats=True, point_box=True, exclude_non_avg_conf=True)
print(path)
saving_utils.save_plotly_fig(fig, path)
df_data_m = DataFrame(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])
df_ci = DataFrame(stats_d['conf_95'], columns=stats_d['x'], index=stats_d['names'])
df_mean = DataFrame([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'], index=['mean', 'conf_95'])
df_data_m.to_csv(path + '-data_means.csv')
df_ci.to_csv(path + '-data_ci.csv')
df_mean.to_csv(path + '-mean_and_CI.csv')
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
names_l = ['amplitude', 'size', 'duration']
measure_l = ['dffMax2', 'area', 'time_s' ]
names_l = ['Event number (per minute)', 'amplitude', 'size', 'duration']
measure_l = [None, 'dffMax2', 'area', 'time_s']
bh_list_pairs = [['rest', 'running'], ['rest', 'stick_rest'], ['running', 'stick_run_ind_15']]
bh_list_pairs_names = ['rest_run', 'rest_rest_stick', 'run_run_stick']
for j, bh_list_pair in enumerate(bh_list_pairs):
for i, measure in enumerate(measure_l):
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'transition_dots_{}'.format(bh_list_pairs_names[j]), '{}'.format('dots_'+names_l[i]))
if 'stick_rest' in bh_list_pair:
plot, stats_d = astro_plotter.get_measure_all_dot_plot(astroA_l_filt, measure, bh_list=bh_list_pair)
else:
plot, stats_d = astro_plotter.get_measure_all_dot_plot(astroA_l, measure, bh_list=bh_list_pair)
saving_utils.save_plotly_fig(plot, plot_path)
with open(os.path.join(plot_path + '-data.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
stats_d['names']
l = ['']
l.extend(stats_d['x'])
l.extend(['conf_0', 'conf_1'])
writer.writerow(l)
for i in range(len(stats_d['names'])):
l = [stats_d['names'][i]]
l.extend(stats_d['mean_l_l'][i])
if 'conf_95' in stats_d:
l.extend(stats_d['conf_95'][i])
writer.writerow(l)
with open(os.path.join(plot_path + '.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow('')
writer.writerow(['mean_0', 'mean_1', 'mean_conf_0', 'mean_conf_1'])
l = []
l.extend(stats_d['mean'])
l.extend(stats_d['mean_conf'])
writer.writerow(l)
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'pdf_norm_fit')
estimates_d = {}
all_event_values = {}
for measure in ['dffMax2' , 'time_s']:
if measure == 'dffMax2':
num_bins = 200
max_filter_val = 3
elif measure == 'time_s':
num_bins = 30
max_filter_val = 2.91
estimates_d[measure] = {}
all_event_values[measure] = {}
for bh in ['rest', 'running']:
fig, x, y_l, all_events_measure_l = astro_plotter.measure_distribution_plot(astroA_l, bh, measure, num_bins=10, min_measure=None, max_measure=None, measure_name=aqua_utils.get_measure_names([measure]), mode='MOE', with_measure_values=True)
all_events_measure_l = np.array(all_events_measure_l)
all_events_measure_l = all_events_measure_l[all_events_measure_l < max_filter_val]
a_estimate, loc_estimate, scale_estimate = skewnorm.fit(all_events_measure_l)
x = np.linspace(np.min(all_events_measure_l), np.max(all_events_measure_l), 100)
p = skewnorm.pdf(x, a_estimate, loc_estimate, scale_estimate)
estimates_d[measure][bh] = [a_estimate, loc_estimate, scale_estimate, np.min(x), np.max(x)]
all_event_values[measure][bh] = np.copy(np.array(all_events_measure_l))
fig = plotly_utils.plot_scatter_histogram(x=x, y_hist=all_events_measure_l, y_scatter=p, num_bins=num_bins)
mean, var, skew, kurt = skewnorm.stats(a=a_estimate, loc=loc_estimate, scale=scale_estimate, moments='mvsk')
a, b = np.histogram(all_events_measure_l, bins=num_bins, range=(0, np.max(x)), density=True)
id_ = measure + '_' + bh
temp_d = {}
temp_d['Parameters'] = ["a={}".format(a_estimate), "loc={}".format(loc_estimate), "scale={}".format(scale_estimate)]
temp_d['Properties'] = ["MEAN={}".format(mean), "VAR={}".format(var), "SKEW={}".format(skew),"KURT={}".format(kurt)]
#print(temp_d)
saving_utils.save_csv_dict(temp_d, os.path.join(path, id_ + '.csv'), key_order=['Parameters', 'Properties'])
saving_utils.save_plotly_fig(fig, os.path.join(path, id_))
#print('skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x)')
#print('skewnorm.pdf(x, a, loc, scale) is identically equivalent to skewnorm.pdf(y, a) / scale with y = (x - loc) / scale')
with_values = True
for measure in ['dffMax2', 'time_s']:
est_rest = estimates_d[measure]['rest']
est_running = estimates_d[measure]['running']
if measure == 'dffMax2':
x_min = 0.6
x_max = 3
nbins = 100
elif measure == 'time_s':
x_min = 0
x_max = 2.91
else:
raise NotImplementedError()
x = np.linspace(x_min, x_max, 500)
if measure == 'duration' or measure == 'time_s':
tempset = set(list(all_event_values[measure]['rest'])).union(set(list(all_event_values[measure]['running'])))
tempset.add(0)
x_val_bins = np.sort(np.array(list(tempset)))
x_val_bins = x_val_bins[x_val_bins <= x_max]
x_val_bins = x_val_bins[x_val_bins >= x_min]
else:
x_val_bins = np.linspace(x_min, x_max, nbins)
#Add bin size / 2 to align better
x_val_diff = 0
if measure == 'duration' or measure == 'time_s':
x_val_diff = (x_val_bins[1] - x_val_bins[0]) / 2
p_rest = skewnorm.pdf(x, est_rest[0], est_rest[1], est_rest[2])
p_running = skewnorm.pdf(x, est_running[0], est_running[1], est_running[2])
if with_values:
vals_running, vals_x_running = np.histogram(all_event_values[measure]['running'][all_event_values[measure]['running'] < x_max], bins=x_val_bins, density=True)
vals_rest, vals_x_rest = np.histogram(all_event_values[measure]['rest'][all_event_values[measure]['rest'] < x_max], bins=x_val_bins, density=True)
#Shift by 1 so they look more aligned(due to large bin sizes)
#e.g. value at 0 is values between 0-(0+bin_size)
#We are essentially moving the point of values lets say [0, 1] to 0 and then with diff to 0.5
vals_running = vals_running[1:]
vals_rest = vals_rest[1:]
measure_name = aqua_utils.get_measure_names([measure])
fig = plotly_utils.plot_scatter_mult(x_l=[x, x, vals_x_rest + x_val_diff, vals_x_running + x_val_diff], y_l_l=[p_rest, p_running, vals_rest, vals_running], mode_l=['lines','lines', 'markers','markers'], name_l=['rest','running', 'rest-true', 'running-true'], confidence=False, with_stats=False, title='Skewed distribution: {}'.format(measure_name), x_title=measure_name, y_title='p(X)')
else:
measure_name = aqua_utils.get_measure_names([measure])
fig = plotly_utils.plot_scatter_mult(x_l=[x, x], y_l_l=[p_rest, p_running], name_l=['rest','running'], confidence=False, with_stats=False, title='Skewed distribution: {}'.format(measure_name), x_title=measure_name, y_title='p(X)')
id_ = 'measure={}-withvalues={}'.format(measure_name, with_values)
saving_utils.save_plotly_fig(fig, os.path.join(path, id_))
def generate_axon_plots(axon_plotter, AA_l, output_folder):
print('---TRANSITION PROPORTION DELAYS PLOT ALL---')
output_experiment_path_all_comparison = os.path.join(output_folder, 'axon_all')
delay_ranges_pairs = [[3*AA_l[0].fr, 6*AA_l[0].fr], [2*AA_l[0].fr, 4*AA_l[0].fr]]
delay_ranges_pairs = [[int(v[0]), int(v[1])] for v in delay_ranges_pairs]
before_range_3, after_range_6 = delay_ranges_pairs[0]
before_range_2, after_range_4 = delay_ranges_pairs[1]
print('Alt Proportion plots...')
rest_to_run_setting = {
'before_bh':'rest_semi_exact',
'inds_bh':'running_exact_start',
'after_bh':'running_semi_exact',
'before_range' : before_range_3,
'after_range' : after_range_6,
'fit': True,
'delay_step_size': 10,
'confidence': True}
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=rest_to_run_setting,
plot_type='behaviour',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'rest_to_run_speed'),
bh_measure='speed')
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=rest_to_run_setting,
plot_type='proportions_stick_filter',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'rest_to_run_vibrisastimtiming'),
bh_measure=None)
for aa_setting in ['axon']:
rest_to_run_setting['aa_setting'] = aa_setting
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=rest_to_run_setting,
plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'rest_to_run_{aa_setting}_proportions'),
bh_measure=None)
run_to_rest_setting = {
'before_bh':'running_semi_exact',
'inds_bh':'rest_start',
'after_bh':'rest_semi_exact',
'before_range' : before_range_3,
'after_range' : after_range_6,
'fit': True,
'delay_step_size': 10,
'confidence': True
}
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_to_rest_setting,
plot_type='behaviour',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_to_rest_speed'),
bh_measure='speed')
for aa_setting in ['axon']:
run_to_rest_setting['aa_setting'] = aa_setting
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_to_rest_setting,
plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_to_rest_{aa_setting}_proportions'),
bh_measure=None)
run_stick_run_setting = {
'before_bh':'running_semi_exact',
'inds_bh':'stick_exact_start',
'after_bh':'running_semi_exact',
'before_range' : before_range_2,
'after_range' : after_range_4,
'fit': True,
'delay_step_size': 10,
'confidence': True
}
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_stick_run_setting,
plot_type='behaviour',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_stick_run_speed'),
bh_measure='speed')
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_stick_run_setting,
plot_type='proportions_stick_filter',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_stick_run_vibrisastimtiming'),
bh_measure=None)
for aa_setting in ['axon', 'astro']:
run_stick_run_setting['aa_setting'] = aa_setting
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_stick_run_setting,
plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_stick_run_{aa_setting}_proportions'),
bh_measure=None)
def __save_astro_transition_plot(astro_plotter, astroA_l, setting, plot_type, path, measure=None, bh_measure=None):
measure_y_titles = {'dffMax2default' : 'Amplitude',
'time_s' : 'Duration (s)',
'area' : 'Size'}
bh_measure_y_titles = {'speed' : 'Speed (cm/s)'}
before_bh=setting['before_bh']
inds_bh = setting['inds_bh']
after_bh = setting['after_bh']
before_range = setting['before_range']
after_range = setting['after_range']
fit = setting['fit']
delay_step_size = setting['delay_step_size']
confidence = setting['confidence']
p = {'fit' : fit, 'delay_step_size' : delay_step_size, 'confidence' : confidence}
if plot_type == 'proportions':
fig_d, bin_stats = astro_plotter.get_transition_proportion_delays_plot_all_alt(astroA_l,
before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
before_range=before_range, after_range=after_range,
**p)
elif plot_type == 'measure':
assert measure is not None
fig_d, bin_stats = astro_plotter.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[measure],
**p)
elif plot_type == 'behaviour':
assert bh_measure is not None
fig_d, bin_stats = astro_plotter.get_transition_bh_values_plot_all_alt(astroA_l,
before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
bh_measure=bh_measure,
before_range=before_range, after_range=after_range,
y_title=bh_measure_y_titles[bh_measure],
**p)
else:
raise ValueError('Plot type must be "proportions", "measure"')
fig_v = fig_d['event_avg_no_mult']
fig_id = os.path.join(path, 'range_{}_{}-step_{}'.format(before_range, after_range, delay_step_size))
saving_utils.save_plotly_fig(fig_v, fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
all_data_dict = {bin_stats['x'][i]:bin_stats['y_all'][:, i] for i in range(len(bin_stats['x']))}
saving_utils.dict_to_csv(all_data_dict, name=fig_id + 'range_{}_{}-step_{}-data.csv'.format(before_range, after_range, delay_step_size), base_folder=path)
#DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
def __save_axon_transition_plot(axon_plotter, AA_l, setting, plot_type, path, bh_measure=None):
bh_measure_y_titles = {'speed' : 'Speed (cm/s)'}
before_bh = setting['before_bh']
inds_bh = setting['inds_bh']
after_bh = setting['after_bh']
before_range = setting['before_range']
after_range = setting['after_range']
fit = setting['fit']
delay_step_size = setting['delay_step_size']
confidence = setting['confidence']
if 'aa_setting' in setting:
aa_setting = setting['aa_setting']
p = {'fit' : fit, 'delay_step_size' : delay_step_size, 'confidence' : confidence, 'setting' : aa_setting}
else:
p = {'fit' : fit, 'delay_step_size' : delay_step_size, 'confidence' : confidence}
if plot_type == 'proportions':
fig_d, bin_stats = axon_plotter.get_axon_transition_proportion_delays_plot_all(AA_l, before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
before_range=before_range, after_range=after_range,
**p)
elif plot_type == 'behaviour':
assert bh_measure is not None
fig_d, bin_stats = axon_plotter.get_transition_bh_values_plot_all_alt(AA_l,
before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
bh_measure=bh_measure,
before_range=before_range, after_range=after_range,
y_title=bh_measure_y_titles[bh_measure],
**p)
elif plot_type == 'proportions_stick_filter':
fig_d, bin_stats = axon_plotter.get_axon_transition_proportion_delays_STICK_FILTER_plot_all(AA_l, before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
before_range=before_range, after_range=after_range,
**p)
else:
raise ValueError('Invalid plot type')
fig_v = fig_d['event_avg_no_mult']
fig_id = os.path.join(path, 'range_{}_{}-step_{}'.format(before_range, after_range, delay_step_size))
saving_utils.save_plotly_fig(fig_v, fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
|
[
"matplotlib.rc",
"scipy.stats.skewnorm.pdf",
"numpy.sum",
"powerlaw.Fit",
"scipy.stats.skewnorm.fit",
"analysis.general_utils.plotly_utils.plot_scatter_mult_with_avg",
"numpy.histogram",
"os.path.isfile",
"scipy.stats.skewnorm.stats",
"analysis.general_utils.plotly_utils.apply_fun_axis_fig",
"analysis.general_utils.saving_utils.save_pth_plt_l_log",
"os.path.join",
"pandas.DataFrame",
"matplotlib.font_manager.FontProperties",
"analysis.general_utils.saving_utils.load_pickle",
"analysis.general_utils.plotly_utils.plot_scatter_error",
"analysis.general_utils.stat_utils.get_pearsonr",
"numpy.max",
"analysis.general_utils.plotly_utils.seaborn_joint_grid",
"analysis.general_utils.plotly_utils.plot_scatter_histogram",
"analysis.general_utils.saving_utils.save_plotly_fig",
"numpy.linspace",
"analysis.general_utils.aqua_utils.get_measure_names",
"analysis.general_utils.general_utils.truncate",
"csv.writer",
"numpy.min",
"analysis.general_utils.saving_utils.save_csv_dict",
"analysis.general_utils.saving_utils.save_pickle",
"numpy.log",
"matplotlib.pyplot.ioff",
"os.makedirs",
"analysis.general_utils.saving_utils.generate_directory_path",
"numpy.array"
] |
[((1460, 1527), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""behaviour_heatmaps"""'], {}), "(output_experiment_path, 'plots', 'behaviour_heatmaps')\n", (1472, 1527), False, 'import os, sys, glob\n'), ((1988, 2066), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""behaviour_heatmaps_saturation"""'], {}), "(output_experiment_path, 'plots', 'behaviour_heatmaps_saturation')\n", (2000, 2066), False, 'import os, sys, glob\n'), ((2623, 2702), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""behaviour_activity"""', '"""activity"""'], {}), "(output_experiment_path, 'plots', 'behaviour_activity', 'activity')\n", (2635, 2702), False, 'import os, sys, glob\n'), ((2786, 2891), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_behaviour_activity', 'behaviour_activity_path'], {'width': '(1200)', 'height': '(800)'}), '(fig_behaviour_activity,\n behaviour_activity_path, width=1200, height=800)\n', (2814, 2891), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((2970, 3043), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""behaviour_areas"""', '"""areas"""'], {}), "(output_experiment_path, 'plots', 'behaviour_areas', 'areas')\n", (2982, 3043), False, 'import os, sys, glob\n'), ((3119, 3188), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_behaviour_area', 'behaviour_area_path'], {}), '(fig_behaviour_area, behaviour_area_path)\n', (3147, 3188), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((3280, 3365), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""signal_amplitudes"""', '"""amplitudes"""'], {}), "(output_experiment_path, 'plots', 'signal_amplitudes', 'amplitudes'\n )\n", (3292, 3365), False, 'import os, sys, glob\n'), ((3450, 3529), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_behaviour_amplitude', 'behaviour_amplitude_path'], {}), '(fig_behaviour_amplitude, behaviour_amplitude_path)\n', (3478, 3529), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((12665, 12751), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""pixel_distribution"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'pixel_distribution')\n", (12677, 12751), False, 'import os, sys, glob\n'), ((13405, 13431), 'os.path.join', 'os.path.join', (['path', '"""real"""'], {}), "(path, 'real')\n", (13417, 13431), False, 'import os, sys, glob\n'), ((13451, 13608), 'analysis.general_utils.plotly_utils.plot_scatter_error', 'plotly_utils.plot_scatter_error', (['x_l', 'y_l_fmt'], {'x_title': '"""Pixel intensity percentile"""', 'y_title': '"""Frequency (Density)"""', 'exp_fit': '(True)', 'with_details': '(True)'}), "(x_l, y_l_fmt, x_title=\n 'Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=\n True, with_details=True)\n", (13482, 13608), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((13603, 13647), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig', 'plot_path'], {}), '(fig, plot_path)\n', (13631, 13647), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((13744, 13860), 'pandas.DataFrame', 'DataFrame', (["[stats_d['mean'], stats_d['conf_95'], stats_d['fit']]"], {'columns': 'x_l', 'index': "['mean', 'conf_95', 'fit']"}), "([stats_d['mean'], stats_d['conf_95'], stats_d['fit']], columns=\n x_l, index=['mean', 'conf_95', 'fit'])\n", (13753, 13860), False, 'from pandas import DataFrame\n'), ((14830, 14856), 'os.path.join', 'os.path.join', (['path', '"""fake"""'], {}), "(path, 'fake')\n", (14842, 14856), False, 'import os, sys, glob\n'), ((14876, 15034), 'analysis.general_utils.plotly_utils.plot_scatter_error', 'plotly_utils.plot_scatter_error', (['x_l', 'y_l_fmt'], {'x_title': '"""Pixel intensity percentile"""', 'y_title': '"""Frequency (Density)"""', 'exp_fit': '(False)', 'with_details': '(True)'}), "(x_l, y_l_fmt, x_title=\n 'Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=\n False, with_details=True)\n", (14907, 15034), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((15030, 15074), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig', 'plot_path'], {}), '(fig, plot_path)\n', (15058, 15074), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((15157, 15249), 'pandas.DataFrame', 'DataFrame', (["[stats_d['mean'], stats_d['conf_95']]"], {'columns': 'x_l', 'index': "['mean', 'conf_95']"}), "([stats_d['mean'], stats_d['conf_95']], columns=x_l, index=['mean',\n 'conf_95'])\n", (15166, 15249), False, 'from pandas import DataFrame\n'), ((15470, 15570), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""power_law_fit_sizes_distribution"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'power_law_fit_sizes_distribution')\n", (15482, 15570), False, 'import os, sys, glob\n'), ((15592, 15634), 'analysis.general_utils.saving_utils.generate_directory_path', 'saving_utils.generate_directory_path', (['path'], {}), '(path)\n', (15628, 15634), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((15724, 15755), 'matplotlib.rc', 'rc', (['"""font"""'], {'family': '"""sans-serif"""'}), "('font', family='sans-serif')\n", (15726, 15755), False, 'from matplotlib import rc\n'), ((15760, 15781), 'matplotlib.rc', 'rc', (['"""font"""'], {'size': '(10.0)'}), "('font', size=10.0)\n", (15762, 15781), False, 'from matplotlib import rc\n'), ((15786, 15810), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (15788, 15810), False, 'from matplotlib import rc\n'), ((16225, 16255), 'numpy.array', 'np.array', (['all_events_measure_l'], {}), '(all_events_measure_l)\n', (16233, 16255), True, 'import numpy as np\n'), ((16266, 16313), 'powerlaw.Fit', 'powerlaw.Fit', (['data_np'], {'discrete': '(True)', 'xmin': 'xmin'}), '(data_np, discrete=True, xmin=xmin)\n', (16278, 16313), False, 'import powerlaw\n'), ((17405, 17415), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (17413, 17415), True, 'import matplotlib.pyplot as plt\n'), ((17482, 17570), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""size_v_time_corr_ALL"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'size_v_time_corr_ALL')\n", (17494, 17570), False, 'import os, sys, glob\n'), ((17642, 17684), 'analysis.general_utils.saving_utils.generate_directory_path', 'saving_utils.generate_directory_path', (['path'], {}), '(path)\n', (17678, 17684), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((17872, 17891), 'numpy.array', 'np.array', (['areas_all'], {}), '(areas_all)\n', (17880, 17891), True, 'import numpy as np\n'), ((17908, 17927), 'numpy.array', 'np.array', (['times_all'], {}), '(times_all)\n', (17916, 17927), True, 'import numpy as np\n'), ((17939, 17984), 'analysis.general_utils.stat_utils.get_pearsonr', 'stat_utils.get_pearsonr', (['times_all', 'areas_all'], {}), '(times_all, areas_all)\n', (17962, 17984), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((17995, 18047), 'pandas.DataFrame', 'pd.DataFrame', (["{'Size': areas_all, 'Time': times_all}"], {}), "({'Size': areas_all, 'Time': times_all})\n", (18007, 18047), True, 'import pandas as pd\n'), ((18738, 18826), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""data"""', '"""split_correlation_all"""'], {}), "(output_experiment_path_all_comparison, 'data',\n 'split_correlation_all')\n", (18750, 18826), False, 'import os, sys, glob\n'), ((18841, 18930), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""split_correlation_all"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'split_correlation_all')\n", (18853, 18930), False, 'import os, sys, glob\n'), ((18954, 19001), 'os.path.join', 'os.path.join', (['save_folder', '"""between_splits.pkl"""'], {}), "(save_folder, 'between_splits.pkl')\n", (18966, 19001), False, 'import os, sys, glob\n'), ((19033, 19078), 'os.path.join', 'os.path.join', (['save_folder', '"""between_days.pkl"""'], {}), "(save_folder, 'between_days.pkl')\n", (19045, 19078), False, 'import os, sys, glob\n'), ((19106, 19145), 'os.path.join', 'os.path.join', (['save_folder', '"""random.pkl"""'], {}), "(save_folder, 'random.pkl')\n", (19118, 19145), False, 'import os, sys, glob\n'), ((19176, 19225), 'os.path.join', 'os.path.join', (['save_folder', '"""between_rest_run.pkl"""'], {}), "(save_folder, 'between_rest_run.pkl')\n", (19188, 19225), False, 'import os, sys, glob\n'), ((22168, 22246), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""data"""', '"""splits_self_all"""'], {}), "(output_experiment_path_all_comparison, 'data', 'splits_self_all')\n", (22180, 22246), False, 'import os, sys, glob\n'), ((22258, 22353), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""splits_self_all"""', '"""self_all"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'splits_self_all', 'self_all')\n", (22270, 22353), False, 'import os, sys, glob\n'), ((23049, 23321), 'analysis.general_utils.plotly_utils.plot_scatter_mult_with_avg', 'plotly_utils.plot_scatter_mult_with_avg', (['x_l[0]', 'y_l_l', 'None', 'name_l'], {'mode': '"""lines"""', 'title': '"""Splits self"""', 'x_title': '"""Splits (minutes)"""', 'y_title': '"""Correlation"""', 'xrange': 'None', 'yrange': 'None', 'confidence': '(True)', 'with_stats': '(True)', 'point_box': '(True)', 'exclude_non_avg_conf': '(True)'}), "(x_l[0], y_l_l, None, name_l, mode=\n 'lines', title='Splits self', x_title='Splits (minutes)', y_title=\n 'Correlation', xrange=None, yrange=None, confidence=True, with_stats=\n True, point_box=True, exclude_non_avg_conf=True)\n", (23088, 23321), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((23348, 23387), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig', 'path'], {}), '(fig, path)\n', (23376, 23387), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((23404, 23480), 'pandas.DataFrame', 'DataFrame', (["stats_d['mean_l_l']"], {'columns': "stats_d['x']", 'index': "stats_d['names']"}), "(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])\n", (23413, 23480), False, 'from pandas import DataFrame\n'), ((23493, 23568), 'pandas.DataFrame', 'DataFrame', (["stats_d['conf_95']"], {'columns': "stats_d['x']", 'index': "stats_d['names']"}), "(stats_d['conf_95'], columns=stats_d['x'], index=stats_d['names'])\n", (23502, 23568), False, 'from pandas import DataFrame\n'), ((23583, 23686), 'pandas.DataFrame', 'DataFrame', (["[stats_d['mean'], stats_d['mean_conf']]"], {'columns': "stats_d['x']", 'index': "['mean', 'conf_95']"}), "([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'],\n index=['mean', 'conf_95'])\n", (23592, 23686), False, 'from pandas import DataFrame\n'), ((26454, 26530), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""pdf_norm_fit"""'], {}), "(output_experiment_path_all_comparison, 'plots', 'pdf_norm_fit')\n", (26466, 26530), False, 'import os, sys, glob\n'), ((32022, 32061), 'os.path.join', 'os.path.join', (['output_folder', '"""axon_all"""'], {}), "(output_folder, 'axon_all')\n", (32034, 32061), False, 'import os, sys, glob\n'), ((39300, 39343), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_v', 'fig_id'], {}), '(fig_v, fig_id)\n', (39328, 39343), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((39348, 39460), 'analysis.general_utils.saving_utils.save_csv_dict', 'saving_utils.save_csv_dict', (['bin_stats'], {'path': "(fig_id + '.csv')", 'key_order': "['x', 'mean', 'std', 'confidence_95']"}), "(bin_stats, path=fig_id + '.csv', key_order=['x',\n 'mean', 'std', 'confidence_95'])\n", (39374, 39460), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((42268, 42311), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_v', 'fig_id'], {}), '(fig_v, fig_id)\n', (42296, 42311), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((42316, 42428), 'analysis.general_utils.saving_utils.save_csv_dict', 'saving_utils.save_csv_dict', (['bin_stats'], {'path': "(fig_id + '.csv')", 'key_order': "['x', 'mean', 'std', 'confidence_95']"}), "(bin_stats, path=fig_id + '.csv', key_order=['x',\n 'mean', 'std', 'confidence_95'])\n", (42342, 42428), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((2470, 2536), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""borders"""', '"""border"""'], {}), "(output_experiment_path, 'plots', 'borders', 'border')\n", (2482, 2536), False, 'import os, sys, glob\n'), ((3784, 3821), 'os.path.join', 'os.path.join', (['output_folder', 'name_tag'], {}), '(output_folder, name_tag)\n', (3796, 3821), False, 'import os, sys, glob\n'), ((13083, 13146), 'numpy.histogram', 'np.histogram', (['grid_flat_nz'], {'bins': '(20)', 'range': '(0, 1)', 'density': '(True)'}), '(grid_flat_nz, bins=20, range=(0, 1), density=True)\n', (13095, 13146), True, 'import numpy as np\n'), ((14461, 14481), 'numpy.max', 'np.max', (['grid_flat_nz'], {}), '(grid_flat_nz)\n', (14467, 14481), True, 'import numpy as np\n'), ((14508, 14571), 'numpy.histogram', 'np.histogram', (['grid_flat_nz'], {'bins': '(20)', 'range': '(0, 1)', 'density': '(True)'}), '(grid_flat_nz, bins=20, range=(0, 1), density=True)\n', (14520, 14571), True, 'import numpy as np\n'), ((16861, 16897), 'os.path.join', 'os.path.join', (['path', "(figname + '.svg')"], {}), "(path, figname + '.svg')\n", (16873, 16897), False, 'import os, sys, glob\n'), ((16934, 16970), 'os.path.join', 'os.path.join', (['path', "(figname + '.png')"], {}), "(path, figname + '.png')\n", (16946, 16970), False, 'import os, sys, glob\n'), ((18127, 18155), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['r', '(2)'], {}), '(r, 2)\n', (18149, 18155), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((18207, 18280), 'analysis.general_utils.plotly_utils.seaborn_joint_grid', 'plotly_utils.seaborn_joint_grid', (['df', '"""Size"""', '"""Time"""'], {'kind': 'kind', 'text': 'text'}), "(df, 'Size', 'Time', kind=kind, text=text)\n", (18238, 18280), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((20292, 20328), 'os.path.isfile', 'os.path.isfile', (['save_random_pkl_path'], {}), '(save_random_pkl_path)\n', (20306, 20328), False, 'import os, sys, glob\n'), ((29356, 29386), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', '(500)'], {}), '(x_min, x_max, 500)\n', (29367, 29386), True, 'import numpy as np\n'), ((30065, 30119), 'scipy.stats.skewnorm.pdf', 'skewnorm.pdf', (['x', 'est_rest[0]', 'est_rest[1]', 'est_rest[2]'], {}), '(x, est_rest[0], est_rest[1], est_rest[2])\n', (30077, 30119), False, 'from scipy.stats import skewnorm\n'), ((30140, 30203), 'scipy.stats.skewnorm.pdf', 'skewnorm.pdf', (['x', 'est_running[0]', 'est_running[1]', 'est_running[2]'], {}), '(x, est_running[0], est_running[1], est_running[2])\n', (30152, 30203), False, 'from scipy.stats import skewnorm\n'), ((1626, 1665), 'os.path.join', 'os.path.join', (['heatmap_grid_base_path', 'k'], {}), '(heatmap_grid_base_path, k)\n', (1638, 1665), False, 'import os, sys, glob\n'), ((1730, 1777), 'os.path.join', 'os.path.join', (['heatmap_grid_base_path', "(k + 'dff')"], {}), "(heatmap_grid_base_path, k + 'dff')\n", (1742, 1777), False, 'import os, sys, glob\n'), ((2165, 2204), 'os.path.join', 'os.path.join', (['heatmap_grid_base_path', 'k'], {}), '(heatmap_grid_base_path, k)\n', (2177, 2204), False, 'import os, sys, glob\n'), ((2269, 2316), 'os.path.join', 'os.path.join', (['heatmap_grid_base_path', "(k + 'dff')"], {}), "(heatmap_grid_base_path, k + 'dff')\n", (2281, 2316), False, 'import os, sys, glob\n'), ((5371, 5408), 'analysis.general_utils.aqua_utils.get_measure_names', 'aqua_utils.get_measure_names', (['measure'], {}), '(measure)\n', (5399, 5408), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((8193, 8284), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""rest_to_run_proportions"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'rest_to_run_proportions')\n", (8205, 8284), False, 'import os, sys, glob\n'), ((8434, 8524), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""rest_to_run_amplitudes"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'rest_to_run_amplitudes')\n", (8446, 8524), False, 'import os, sys, glob\n'), ((8661, 8750), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""rest_to_run_durations"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'rest_to_run_durations')\n", (8673, 8750), False, 'import os, sys, glob\n'), ((8885, 8970), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""rest_to_run_sizes"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'rest_to_run_sizes')\n", (8897, 8970), False, 'import os, sys, glob\n'), ((9159, 9244), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""rest_to_run_speed"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'rest_to_run_speed')\n", (9171, 9244), False, 'import os, sys, glob\n'), ((9763, 9854), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_to_rest_proportions"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_to_rest_proportions')\n", (9775, 9854), False, 'import os, sys, glob\n'), ((9999, 10089), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_to_rest_amplitudes"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_to_rest_amplitudes')\n", (10011, 10089), False, 'import os, sys, glob\n'), ((10226, 10315), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_to_rest_durations"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_to_rest_durations')\n", (10238, 10315), False, 'import os, sys, glob\n'), ((10450, 10535), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_to_rest_sizes"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_to_rest_sizes')\n", (10462, 10535), False, 'import os, sys, glob\n'), ((10724, 10809), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_to_rest_speed"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_to_rest_speed')\n", (10736, 10809), False, 'import os, sys, glob\n'), ((11343, 11436), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_stick_run_proportions"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_stick_run_proportions')\n", (11355, 11436), False, 'import os, sys, glob\n'), ((11583, 11675), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_stick_run_amplitudes"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_stick_run_amplitudes')\n", (11595, 11675), False, 'import os, sys, glob\n'), ((11814, 11905), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_stick_run_durations"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_stick_run_durations')\n", (11826, 11905), False, 'import os, sys, glob\n'), ((12042, 12129), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_stick_run_sizes"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_stick_run_sizes')\n", (12054, 12129), False, 'import os, sys, glob\n'), ((12273, 12360), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_stick_run_speed"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_stick_run_speed')\n", (12285, 12360), False, 'import os, sys, glob\n'), ((13224, 13236), 'numpy.sum', 'np.sum', (['hist'], {}), '(hist)\n', (13230, 13236), True, 'import numpy as np\n'), ((13673, 13698), 'numpy.array', 'np.array', (["stats_d['data']"], {}), "(stats_d['data'])\n", (13681, 13698), True, 'import numpy as np\n'), ((14649, 14661), 'numpy.sum', 'np.sum', (['hist'], {}), '(hist)\n', (14655, 14661), True, 'import numpy as np\n'), ((15100, 15125), 'numpy.array', 'np.array', (["stats_d['data']"], {}), "(stats_d['data'])\n", (15108, 15125), True, 'import numpy as np\n'), ((15835, 15851), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {}), '()\n', (15849, 15851), False, 'from matplotlib.font_manager import FontProperties\n'), ((17777, 17805), 'numpy.log', 'np.log', (["astroA.res_d['area']"], {}), "(astroA.res_d['area'])\n", (17783, 17805), True, 'import numpy as np\n'), ((20353, 20399), 'analysis.general_utils.saving_utils.load_pickle', 'saving_utils.load_pickle', (['save_random_pkl_path'], {}), '(save_random_pkl_path)\n', (20377, 20399), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((20637, 20693), 'analysis.general_utils.saving_utils.save_pickle', 'saving_utils.save_pickle', (['random_l', 'save_random_pkl_path'], {}), '(random_l, save_random_pkl_path)\n', (20661, 20693), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((21062, 21097), 'os.path.join', 'os.path.join', (['plot_folder', '"""splits"""'], {}), "(plot_folder, 'splits')\n", (21074, 21097), False, 'import os, sys, glob\n'), ((21150, 21194), 'os.path.join', 'os.path.join', (['plot_folder', "('splits' + '.csv')"], {}), "(plot_folder, 'splits' + '.csv')\n", (21162, 21194), False, 'import os, sys, glob\n'), ((21432, 21481), 'os.path.join', 'os.path.join', (['plot_folder', "('splits-data' + '.csv')"], {}), "(plot_folder, 'splits-data' + '.csv')\n", (21444, 21481), False, 'import os, sys, glob\n'), ((25036, 25081), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['plot', 'plot_path'], {}), '(plot, plot_path)\n', (25064, 25081), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((27219, 27249), 'numpy.array', 'np.array', (['all_events_measure_l'], {}), '(all_events_measure_l)\n', (27227, 27249), True, 'import numpy as np\n'), ((27400, 27434), 'scipy.stats.skewnorm.fit', 'skewnorm.fit', (['all_events_measure_l'], {}), '(all_events_measure_l)\n', (27412, 27434), False, 'from scipy.stats import skewnorm\n'), ((27557, 27614), 'scipy.stats.skewnorm.pdf', 'skewnorm.pdf', (['x', 'a_estimate', 'loc_estimate', 'scale_estimate'], {}), '(x, a_estimate, loc_estimate, scale_estimate)\n', (27569, 27614), False, 'from scipy.stats import skewnorm\n'), ((27821, 27926), 'analysis.general_utils.plotly_utils.plot_scatter_histogram', 'plotly_utils.plot_scatter_histogram', ([], {'x': 'x', 'y_hist': 'all_events_measure_l', 'y_scatter': 'p', 'num_bins': 'num_bins'}), '(x=x, y_hist=all_events_measure_l,\n y_scatter=p, num_bins=num_bins)\n', (27856, 27926), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((27959, 28047), 'scipy.stats.skewnorm.stats', 'skewnorm.stats', ([], {'a': 'a_estimate', 'loc': 'loc_estimate', 'scale': 'scale_estimate', 'moments': '"""mvsk"""'}), "(a=a_estimate, loc=loc_estimate, scale=scale_estimate,\n moments='mvsk')\n", (27973, 28047), False, 'from scipy.stats import skewnorm\n'), ((29814, 29846), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', 'nbins'], {}), '(x_min, x_max, nbins)\n', (29825, 29846), True, 'import numpy as np\n'), ((30280, 30412), 'numpy.histogram', 'np.histogram', (["all_event_values[measure]['running'][all_event_values[measure]['running'] <\n x_max]"], {'bins': 'x_val_bins', 'density': '(True)'}), "(all_event_values[measure]['running'][all_event_values[measure]\n ['running'] < x_max], bins=x_val_bins, density=True)\n", (30292, 30412), True, 'import numpy as np\n'), ((30445, 30571), 'numpy.histogram', 'np.histogram', (["all_event_values[measure]['rest'][all_event_values[measure]['rest'] < x_max]"], {'bins': 'x_val_bins', 'density': '(True)'}), "(all_event_values[measure]['rest'][all_event_values[measure][\n 'rest'] < x_max], bins=x_val_bins, density=True)\n", (30457, 30571), True, 'import numpy as np\n'), ((30931, 30970), 'analysis.general_utils.aqua_utils.get_measure_names', 'aqua_utils.get_measure_names', (['[measure]'], {}), '([measure])\n', (30959, 30970), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((31420, 31459), 'analysis.general_utils.aqua_utils.get_measure_names', 'aqua_utils.get_measure_names', (['[measure]'], {}), '([measure])\n', (31448, 31459), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((31829, 31852), 'os.path.join', 'os.path.join', (['path', 'id_'], {}), '(path, id_)\n', (31841, 31852), False, 'import os, sys, glob\n'), ((32921, 33007), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""rest_to_run_speed"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'rest_to_run_speed')\n", (32933, 33007), False, 'import os, sys, glob\n'), ((33257, 33355), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""rest_to_run_vibrisastimtiming"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'rest_to_run_vibrisastimtiming')\n", (33269, 33355), False, 'import os, sys, glob\n'), ((34426, 34512), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""run_to_rest_speed"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'run_to_rest_speed')\n", (34438, 34512), False, 'import os, sys, glob\n'), ((35600, 35688), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""run_stick_run_speed"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'run_stick_run_speed')\n", (35612, 35688), False, 'import os, sys, glob\n'), ((35936, 36036), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""run_stick_run_vibrisastimtiming"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'run_stick_run_vibrisastimtiming')\n", (35948, 36036), False, 'import os, sys, glob\n'), ((5997, 6074), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['plot', '(lambda x: x / astroA_l[0].fr)'], {'axis': '"""x"""'}), "(plot, lambda x: x / astroA_l[0].fr, axis='x')\n", (6028, 6074), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((6127, 6184), 'analysis.general_utils.saving_utils.save_pth_plt_l_log', 'saving_utils.save_pth_plt_l_log', (['[plot]', '[path]'], {'axis': '"""x"""'}), "([plot], [path], axis='x')\n", (6158, 6184), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((6219, 6259), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['plot', 'path'], {}), '(plot, path)\n', (6247, 6259), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((25192, 25269), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(csv_file, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (25202, 25269), False, 'import csv\n'), ((25850, 25927), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(csv_file, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (25860, 25927), False, 'import csv\n'), ((27476, 27504), 'numpy.min', 'np.min', (['all_events_measure_l'], {}), '(all_events_measure_l)\n', (27482, 27504), True, 'import numpy as np\n'), ((27506, 27534), 'numpy.max', 'np.max', (['all_events_measure_l'], {}), '(all_events_measure_l)\n', (27512, 27534), True, 'import numpy as np\n'), ((27697, 27706), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (27703, 27706), True, 'import numpy as np\n'), ((27708, 27717), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (27714, 27717), True, 'import numpy as np\n'), ((27771, 27801), 'numpy.array', 'np.array', (['all_events_measure_l'], {}), '(all_events_measure_l)\n', (27779, 27801), True, 'import numpy as np\n'), ((28572, 28604), 'os.path.join', 'os.path.join', (['path', "(id_ + '.csv')"], {}), "(path, id_ + '.csv')\n", (28584, 28604), False, 'import os, sys, glob\n'), ((28692, 28715), 'os.path.join', 'os.path.join', (['path', 'id_'], {}), '(path, id_)\n', (28704, 28715), False, 'import os, sys, glob\n'), ((33757, 33862), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""rest_to_run_{aa_setting}_proportions"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'rest_to_run_{aa_setting}_proportions')\n", (33769, 33862), False, 'import os, sys, glob\n'), ((34917, 35022), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""run_to_rest_{aa_setting}_proportions"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'run_to_rest_{aa_setting}_proportions')\n", (34929, 35022), False, 'import os, sys, glob\n'), ((36456, 36563), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""run_stick_run_{aa_setting}_proportions"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'run_stick_run_{aa_setting}_proportions')\n", (36468, 36563), False, 'import os, sys, glob\n'), ((6539, 6556), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (6550, 6556), False, 'import os, sys, glob\n'), ((25105, 25142), 'os.path.join', 'os.path.join', (["(plot_path + '-data.csv')"], {}), "(plot_path + '-data.csv')\n", (25117, 25142), False, 'import os, sys, glob\n'), ((25768, 25800), 'os.path.join', 'os.path.join', (["(plot_path + '.csv')"], {}), "(plot_path + '.csv')\n", (25780, 25800), False, 'import os, sys, glob\n'), ((27105, 27144), 'analysis.general_utils.aqua_utils.get_measure_names', 'aqua_utils.get_measure_names', (['[measure]'], {}), '([measure])\n', (27133, 27144), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((28140, 28149), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (28146, 28149), True, 'import numpy as np\n'), ((6920, 6944), 'numpy.array', 'np.array', (["temp_d['data']"], {}), "(temp_d['data'])\n", (6928, 6944), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2020/12/24 3:48 PM
# @Author : Kevin
from src.chat import dataset
from src.chat.seq2seq import ChatSeq2Seq
from torch.optim import Adam
import torch.nn.functional as F
import torch
from src import config
from tqdm import tqdm
from src.lib import device,chat_answer_word_sequence_model
from torch import nn
def train(epoch):
# 1.准备数据
dataloader = dataset.get_dataloader()
# 2.建立模型
seq2seq = ChatSeq2Seq().to(device)
optimizer=Adam(seq2seq.parameters(),lr=0.001)
former_acc=0.
seq2seq.train()
bar=tqdm(enumerate(dataloader),ascii=True,desc="training...")
# 3.训练
for index, (asks, answers, ask_lens, answer_lens) in bar:
asks=asks.to(device)
answers=answers.to(device)
optimizer.zero_grad()
decoder_outputs_softmax, decoder_hidden_state = seq2seq(asks, answers,ask_lens,answer_lens)
# [batch size,seq len]>[batch size*seq len]
answers=answers.view(-1)
# decoder_outputs[batch size,seq len,dict size]>[batch size*seq len,dict size]
# -1就是保留
decoder_outputs_softmax=decoder_outputs_softmax.view(decoder_outputs_softmax.size(0)*decoder_outputs_softmax.size(1),-1)
# 保留hidden size维度
# loss ouputs二维,label一维 21 损失21金维他
loss=F.cross_entropy(decoder_outputs_softmax,answers,ignore_index=chat_answer_word_sequence_model.PAD)
loss.backward()
# 梯度裁剪,裁剪掉过大梯度,避免梯度爆炸
# 下划线是直接修改
nn.utils.clip_grad_norm_(seq2seq.parameters(),config.caht_train_grad_clip_max)
optimizer.step()
# 计算正确率
acc = decoder_outputs_softmax.max(dim=-1)[-1]
acc = acc.eq(answers).float().mean()
bar.set_description(f"eporch:{epoch}\tindex:{index}\tloss:{loss.item()}\t正确率:{acc}")
if acc>former_acc:
torch.save(seq2seq.state_dict(), config.chat_seq_2_seq_model_path)
torch.save(optimizer.state_dict(), config.chat_seq_optimizer_model_path)
if epoch%10==0:
torch.save(seq2seq.state_dict(), config.chat_seq_2_seq_model_path+str(epoch))
torch.save(optimizer.state_dict(), config.chat_seq_optimizer_model_path+str(epoch))
former_acc=acc
return former_acc
if __name__ == '__main__':
epoch=30
acc=[]
for i in range(epoch):
former_acc=train(i)
print(acc)
# eval()
|
[
"src.chat.dataset.get_dataloader",
"torch.nn.functional.cross_entropy",
"src.chat.seq2seq.ChatSeq2Seq"
] |
[((393, 417), 'src.chat.dataset.get_dataloader', 'dataset.get_dataloader', ([], {}), '()\n', (415, 417), False, 'from src.chat import dataset\n'), ((1300, 1404), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['decoder_outputs_softmax', 'answers'], {'ignore_index': 'chat_answer_word_sequence_model.PAD'}), '(decoder_outputs_softmax, answers, ignore_index=\n chat_answer_word_sequence_model.PAD)\n', (1315, 1404), True, 'import torch.nn.functional as F\n'), ((445, 458), 'src.chat.seq2seq.ChatSeq2Seq', 'ChatSeq2Seq', ([], {}), '()\n', (456, 458), False, 'from src.chat.seq2seq import ChatSeq2Seq\n')]
|
from flask_sqlalchemy import SQLAlchemy
from flask_admin import Admin
db = SQLAlchemy()
admin = Admin(template_mode='bootstrap3')
def register_extensions(app):
db.init_app(app)
admin.init_app(app)
from app.admin_ import register_modelviews
register_modelviews(admin, app)
|
[
"flask_admin.Admin",
"app.admin_.register_modelviews",
"flask_sqlalchemy.SQLAlchemy"
] |
[((77, 89), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (87, 89), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((98, 131), 'flask_admin.Admin', 'Admin', ([], {'template_mode': '"""bootstrap3"""'}), "(template_mode='bootstrap3')\n", (103, 131), False, 'from flask_admin import Admin\n'), ((260, 291), 'app.admin_.register_modelviews', 'register_modelviews', (['admin', 'app'], {}), '(admin, app)\n', (279, 291), False, 'from app.admin_ import register_modelviews\n')]
|
# 2018/11/01~2018/07/12
# <NAME>, <EMAIL>.
"""
graphML.py Module for basic GSP and graph machine learning functions.
Functionals
LSIGF: Applies a linear shift-invariant graph filter
spectralGF: Applies a linear shift-invariant graph filter in spectral form
NVGF: Applies a node-variant graph filter
EVGF: Applies an edge-variant graph filter
learnAttentionGSO: Computes the GSO following the attention mechanism
graphAttention: Applies a graph attention layer
Filtering Layers (nn.Module)
GraphFilter: Creates a graph convolutional layer using LSI graph filters
SpectralGF: Creates a graph convolutional layer using LSI graph filters in
spectral form
NodeVariantGF: Creates a graph filtering layer using node-variant graph filters
EdgeVariantGF: Creates a graph filtering layer using edge-variant graph filters
GraphAttentional: Creates a layer using graph attention mechanisms
Activation Functions - Nonlinearities (nn.Module)
MaxLocalActivation: Creates a localized max activation function layer
MedianLocalActivation: Creates a localized median activation function layer
NoActivation: Creates a layer for no activation function
Summarizing Functions - Pooling (nn.Module)
NoPool: No summarizing function.
MaxPoolLocal: Max-summarizing function
"""
import math
import numpy as np
import torch
import torch.nn as nn
import utils.graphUtils.graphTools as graphTools
zeroTolerance = 1e-9 # Values below this number are considered zero.
infiniteNumber = 1e12 # infinity equals this number
# WARNING: Only scalar bias.
def LSIGF(h, S, x, b=None):
"""
LSIGF(filter_taps, GSO, input, bias=None) Computes the output of a linear
shift-invariant graph filter on input and then adds bias.
Denote as G the number of input features, F the number of output features,
E the number of edge features, K the number of filter taps, N the number of
nodes, S_{e} in R^{N x N} the GSO for edge feature e, x in R^{G x N} the
input data where x_{g} in R^{N} is the graph signal representing feature
g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the
bias for feature f.
Then, the LSI-GF is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{k=0}^{K-1}
\sum_{g=1}^{G}
[h_{f,g,e}]_{k} S_{e}^{k} x_{g}
+ b_{f}
for f = 1, ..., F.
Inputs:
filter_taps (torch.tensor): array of filter taps; shape:
output_features x edge_features x filter_taps x input_features
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
"""
# The basic idea of what follows is to start reshaping the input and the
# GSO so the filter coefficients go just as a very plain and simple
# linear operation, so that all the derivatives and stuff on them can be
# easily computed.
# h is output_features x edge_weights x filter_taps x input_features
# S is edge_weighs x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
K = h.shape[2]
G = h.shape[3]
assert S.shape[0] == E
N = S.shape[1]
assert S.shape[2] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation we've been using:
# h in F x E x K x G
# S in E x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# Now, we have x in B x G x N and S in E x N x N, and we want to come up
# with matrix multiplication that yields z = x * S with shape
# B x E x K x G x N.
# For this, we first add the corresponding dimensions
x = x.reshape([B, 1, G, N])
S = S.reshape([1, E, N, N])
z = x.reshape([B, 1, 1, G, N]).repeat(1, E, 1, 1, 1) # This is for k = 0
# We need to repeat along the E dimension, because for k=0, S_{e} = I for
# all e, and therefore, the same signal values have to be used along all
# edge feature dimensions.
for k in range(1,K):
x = torch.matmul(x, S) # B x E x G x N
xS = x.reshape([B, E, 1, G, N]) # B x E x 1 x G x N
z = torch.cat((z, xS), dim = 2) # B x E x k x G x N
# This output z is of size B x E x K x G x N
# Now we have the x*S_{e}^{k} product, and we need to multiply with the
# filter taps.
# We multiply z on the left, and h on the right, the output is to be
# B x N x F (the multiplication is not along the N dimension), so we reshape
# z to be B x N x E x K x G and reshape it to B x N x EKG (remember we
# always reshape the last dimensions), and then make h be E x K x G x F and
# reshape it to EKG x F, and then multiply
y = torch.matmul(z.permute(0, 4, 1, 2, 3).reshape([B, N, E*K*G]),
h.reshape([F, E*K*G]).permute(1, 0)).permute(0, 2, 1)
# And permute againt to bring it from B x N x F to B x F x N.
# Finally, add the bias
if b is not None:
y = y + b
return y
def spectralGF(h, V, VH, x, b=None):
"""
spectralGF(filter_coeff, eigenbasis, eigenbasis_hermitian, input, bias=None)
Computes the output of a linear shift-invariant graph filter in spectral
form applying filter_coefficients on the graph fourier transform of the
input .
Denote as G the number of input features, F the number of output features,
E the number of edge features, N the number of nodes, S_{e} in R^{N x N}
the GSO for edge feature e with S_{e} = V_{e} Lambda_{e} V_{e}^{H} as
eigendecomposition, x in R^{G x N} the input data where x_{g} in R^{N} is
the graph signal representing feature g, and b in R^{F x N} the bias vector,
with b_{f} in R^{N} representing the bias for feature f.
Then, the LSI-GF in spectral form is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{g=1}^{G}
V_{e} diag(h_{f,g,e}) V_{e}^{H} x_{g}
+ b_{f}
for f = 1, ..., F, with h_{f,g,e} in R^{N} the filter coefficients for
output feature f, input feature g and edge feature e.
Inputs:
filter_coeff (torch.tensor): array of filter coefficients; shape:
output_features x edge_features x input_features x number_nodes
eigenbasis (torch.tensor): eigenbasis of the graph shift operator;shape:
edge_features x number_nodes x number_nodes
eigenbasis_hermitian (torch.tensor): hermitian of the eigenbasis; shape:
edge_features x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
Obs.: While we consider most GSOs to be normal (so that the eigenbasis is
an orthonormal basis), this function would also work if V^{-1} is used as
input instead of V^{H}
"""
# The decision to input both V and V_H is to avoid any time spent in
# permuting/inverting the matrix. Because this depends on the graph and not
# the data, it can be done faster if we just input it.
# h is output_features x edge_weights x input_features x number_nodes
# V is edge_weighs x number_nodes x number_nodes
# VH is edge_weighs x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
G = h.shape[2]
N = h.shape[3]
assert V.shape[0] == VH.shape[0] == E
assert V.shape[1] == VH.shape[1] == V.shape[2] == VH.shape[2] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation I've been using:
# h in F x E x G x N
# V in E x N x N
# VH in E x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# We will do proper matrix multiplication in this case (algebraic
# multiplication using column vectors instead of CS notation using row
# vectors).
# We will multiply separate VH with x, and V with diag(h).
# First, to multiply VH with x, we need to add one dimension for each one
# of them (dimension E for x and dimension B for VH)
x = x.reshape([B, 1, G, N]).permute(0, 1, 3, 2) # B x 1 x N x G
VH = VH.reshape([1, E, N, N]) # 1 x E x N x N
# Now we multiply. Note that we also permute to make it B x E x G x N
# instead of B x E x N x G because we want to multiply for a specific e and
# g, there we do not want to sum (yet) over G.
VHx = torch.matmul(VH, x).permute(0, 1, 3, 2) # B x E x G x N
# Now we want to multiply V * diag(h), both are matrices. So first, we
# add the necessary dimensions (B and G for V and an extra N for h to make
# it a matrix from a vector)
V = V.reshape([1, E, 1, N, N]) # 1 x E x 1 x N x N
# We note that multiplying by a diagonal matrix to the right is equivalent
# to an elementwise multiplication in which each column is multiplied by
# a different number, so we will do this to make it faster (elementwise
# multiplication is faster than matrix multiplication). We need to repeat
# the vector we have columnwise.
diagh = h.reshape([F, E, G, 1, N]).repeat(1, 1, 1, N, 1) # F x E x G x N x N
# And now we do elementwise multiplication
Vdiagh = V * diagh # F x E x G x N x N
# Finally, we make the multiplication of these two matrices. First, we add
# the corresponding dimensions
Vdiagh = Vdiagh.reshape([1, F, E, G, N, N]) # 1 x F x E x G x N x N
VHx = VHx.reshape([B, 1, E, G, N, 1]) # B x 1 x E x G x N x 1
# And do matrix multiplication to get all the corresponding B,F,E,G vectors
VdiaghVHx = torch.matmul(Vdiagh, VHx) # B x F x E x G x N x 1
# Get rid of the last dimension which we do not need anymore
y = VdiaghVHx.squeeze(5) # B x F x E x G x N
# Sum over G
y = torch.sum(y, dim = 3) # B x F x E x N
# Sum over E
y = torch.sum(y, dim = 2) # B x F x N
# Finally, add the bias
if b is not None:
y = y + b
return y
def NVGF(h, S, x, b=None):
"""
NVGF(filter_taps, GSO, input, bias=None) Computes the output of a
node-variant graph filter on input and then adds bias.
Denote as G the number of input features, F the number of output features,
E the number of edge features, K the number of shifts, N the number of
nodes, S_{e} in R^{N x N} the GSO for edge feature e, x in R^{G x N} the
input data where x_{g} in R^{N} is the graph signal representing feature
g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the
bias for feature f. Denote as h_{k}^{efg} in R^{N} the vector with the N
filter taps corresponding to the efg filter for shift k.
Then, the NV-GF is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{k=0}^{K-1}
\sum_{g=1}^{G}
diag(h_{k}^{efg}) S_{e}^{k} x_{g}
+ b_{f}
for f = 1, ..., F.
Inputs:
filter_taps (torch.tensor): array of filter taps; shape:
output_features x edge_features x filter_taps x input_features
x number_nodes
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
"""
# h is output_features x edge_weights x filter_taps x input_features
# x number_nodes
# S is edge_weighs x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
K = h.shape[2]
G = h.shape[3]
N = h.shape[4]
assert S.shape[0] == E
assert S.shape[1] == S.shape[2] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation I've been using:
# h in F x E x K x G x N
# S in E x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# Now, we have x in B x G x N and S in E x N x N, and we want to come up
# with matrix multiplication that yields z = x * S with shape
# B x E x K x G x N.
# For this, we first add the corresponding dimensions
xr = x.reshape([B, 1, G, N])
Sr = S.reshape([1, E, N, N])
z = xr.reshape([B, 1, 1, G, N]).repeat(1, E, 1, 1, 1) # This is for k = 0
# We need to repeat along the E dimension, because for k=0, S_{e} = I for
# all e, and therefore, the same signal values have to be used along all
# edge feature dimensions.
for k in range(1,K):
xr = torch.matmul(xr, Sr) # B x E x G x N
xS = xr.reshape([B, E, 1, G, N]) # B x E x 1 x G x N
z = torch.cat((z, xS), dim = 2) # B x E x k x G x N
# This output z is of size B x E x K x G x N
# Now we have the x*S_{e}^{k} product, and we need to multiply with the
# filter taps.
# This multiplication with filter taps is ``element wise'' on N since for
# each node we have a different element
# First, add the extra dimension (F for z, and B for h)
z = z.reshape([B, 1, E, K, G, N])
h = h.reshape([1, F, E, K, G, N])
# Now let's do elementwise multiplication
zh = z * h
# And sum over the dimensions E, K, G to get B x F x N
y = torch.sum(zh, dim = 4) # Sum over G
y = torch.sum(y, dim = 3) # Sum over K
y = torch.sum(y, dim = 2) # Sum over E
# Finally, add the bias
if b is not None:
y = y + b
return y
def EVGF(S, x, b=None):
"""
EVGF(filter_matrices, input, bias=None) Computes the output of an
edge-variant graph filter on input and then adds bias.
Denote as G the number of input features, F the number of output features,
E the number of edge features, K the number of shifts, N the number of
nodes, Phi_{efg} in R^{N x N} the filter matrix for edge feature e, output
feature f and input feature g (recall that Phi_{efg}^{k} has the same
sparsity pattern as the graph, except for Phi_{efg}^{0} which is expected to
be a diagonal matrix), x in R^{G x N} the input data where x_{g} in R^{N} is
the graph signal representing feature g, and b in R^{F x N} the bias vector,
with b_{f} in R^{N} representing the bias for feature f.
Then, the EV-GF is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{k=0}^{K-1}
\sum_{g=1}^{G}
Phi_{efg}^{k:0} x_{g}
+ b_{f}
for f = 1, ..., F, with Phi_{efg}^{k:0} = Phi_{efg}^{k} Phi_{efg}^{k-1} ...
Phi_{efg}^{0}.
Inputs:
filter_matrices (torch.tensor): array of filter matrices; shape:
output_features x edge_features x filter_taps x input_features
x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
"""
# We just need to multiply by the filter_matrix recursively, and then
# add for all E, G, and K features.
# S is output_features x edge_features x filter_taps x input_features
# x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = S.shape[0]
E = S.shape[1]
K = S.shape[2]
G = S.shape[3]
N = S.shape[4]
assert S.shape[5] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation I've been using:
# S in F x E x K x G x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# We will be doing matrix multiplications in the algebraic way, trying to
# multiply the N x N matrix corresponding to the appropriate e, f, k and g
# dimensions, with the respective x vector (N x 1 column vector)
# For this, we first add the corresponding dimensions (for x we add
# dimensions F, E and the last dimension for column vector)
x = x.reshape([B, 1, 1, G, N, 1])
# When we do index_select along dimension K we get rid of this dimension
Sk = torch.index_select(S, 2, torch.tensor(0).to(S.device)).squeeze(2)
# Sk in F x E x G x N x N
# And we add one further dimension for the batch size B
Sk = Sk.unsqueeze(0) # 1 x F x E x G x N x N
# Matrix multiplication
x = torch.matmul(Sk, x) # B x F x E x G x N x 1
# And we collect this for every k in a vector z, along the K dimension
z = x.reshape([B, F, E, 1, G, N, 1]).squeeze(6) # B x F x E x 1 x G x N
# Now we do all the matrix multiplication
for k in range(1,K):
# Extract the following k
Sk = torch.index_select(S, 2, torch.tensor(k).to(S.device)).squeeze(2)
# Sk in F x E x G x N x N
# Give space for the batch dimension B
Sk = Sk.unsqueeze(0) # 1 x F x E x G x N x N
# Multiply with the previously cumulative Sk * x
x = torch.matmul(Sk, x) # B x F x E x G x N x 1
# Get rid of the last dimension (of a column vector)
Sx = x.reshape([B, F, E, 1, G, N, 1]).squeeze(6) # B x F x E x 1 x G x N
# Add to the z
z = torch.cat((z, Sx), dim = 2) # B x F x E x k x G x N
# Sum over G
z = torch.sum(z, dim = 4)
# Sum over K
z = torch.sum(z, dim = 3)
# Sum over E
y = torch.sum(z, dim = 2)
if b is not None:
y = y + b
return y
def learnAttentionGSO(x, a, W, S, negative_slope=0.2):
"""
learnAttentionGSO(x, a, W, S) Computes the GSO following the attention
mechanism
Denote as G the number of input features, F the number of output features,
E the number of edge features, P the number of attention heads, Ji the
number of nodes in N_{i}, the neighborhood of node i, and N the number of
nodes. Let x_{i} in R^{G} be the feature associated to node i,
W^{ep} in R^{F x G} the weight marix associated to edge feature e and
attention head p, and a^{ep} in R^{2F} the mixing vector. Let
alpha_{ij}^{ep} in R the attention coefficient between nodes i and j, for
edge feature e and attention head p, and let s_{ij}^{e} be the value of
feature e of the edge connecting nodes i and j.
Each elements of the new GSO is alpha_{ij}^{ep} computed as
alpha_{ij}^{ep} = softmax_{j} ( LeakyReLU_{beta} (
(a^{ep})^T [cat(W^{ep}x_{i}, W^{ep} x_{j})]
))
for all j in N_{i}, and where beta is the negative slope of the leaky ReLU.
Inputs:
x (torch.tensor): input;
shape: batch_size x input_features x number_nodes
a (torch.tensor): mixing parameter; shape:
number_heads x edge_features x 2 * output_features
W (torch.tensor): linear parameter; shape:
number_heads x edge_features x output_features x input_features
S (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
negative_slope (float): negative slope of the leaky relu (default: 0.2)
Outputs:
aij: output GSO; shape:
batch_size x number_heads x edge_features x number_nodes x number_nodes
"""
B = x.shape[0] # batch_size
G = x.shape[1] # input_features
N = x.shape[2] # number_nodes
P = a.shape[0] # number_heads
E = a.shape[1] # edge_features
assert W.shape[0] == P
assert W.shape[1] == E
F = W.shape[2] # output_features
assert a.shape[2] == int(2*F)
G = W.shape[3] # input_features
assert S.shape[0] == E
assert S.shape[1] == S.shape[2] == N
# Add ones of the GSO at all edge feature levels so that the node always
# has access to itself. The fact that it's one is not so relevant, because
# the attention coefficient that is learned would compensate for this
S = S + torch.eye(N).reshape([1,N,N]).repeat(E,1,1).to(S.device)
# WARNING:
# (If the GSOs already have self-connections, then these will be added a 1,
# which might be a problem if the self-connection is a -1. I will have to
# think of this more carefully)
# W is of size P x E x F x G
# a is of size P x E x 2F
# Compute Wx for all nodes
x = x.reshape([B, 1, 1, G, N])
W = W.reshape([1, P, E, F, G])
Wx = torch.matmul(W, x) # B x P x E x F x N
# Now, do a_1^T Wx, and a_2^T Wx to get a tensor of shape B x P x E x 1 x N
# because we're applying the inner product on the F dimension.
a1 = torch.index_select(a, 2, torch.arange(F).to(x.device)) # K x E x F
a2 = torch.index_select(a, 2, torch.arange(F, 2*F).to(x.device)) # K x E x F
a1Wx = torch.matmul(a1.reshape([1, P, E, 1, F]), Wx) # B x P x E x 1 x N
a2Wx = torch.matmul(a2.reshape([1, P, E, 1, F]), Wx) # B x P x E x 1 x N
# And then, use this to sum them accordingly and create a B x P x E x N x N
# matrix.
aWx = a1Wx + a2Wx.permute(0, 1, 2, 4, 3) # B x P x E x N x N
# Obs.: In this case, we have one column vector and one row vector; then,
# what the sum does, is to repeat the column and the row, respectively,
# until both matrices are of the same size, and then adds up, which is
# precisely what we want to do
# Apply the LeakyRelu
eij = nn.functional.leaky_relu(aWx, negative_slope = negative_slope)
# B x P x E x N x N
# Each element of this N x N matrix is, precisely, e_ij (eq. 1) in the GAT
# paper.
# And apply the softmax. For the softmax, we do not want to consider
# the places where there are no neighbors, so we need to set them to -infty
# so that they will be assigned a zero.
# First, get places where we have edges
maskEdges = torch.sum(torch.abs(S.data), dim = 0)
# Make it a binary matrix
maskEdges = (maskEdges > zeroTolerance).type(x.dtype)
# Make it -infinity where there are zeros
infinityMask = (1-maskEdges) * infiniteNumber
# Compute the softmax plus the -infinity (we first force the places where
# there is no edge to be zero, and then we add -infinity to them)
aij = nn.functional.softmax(eij*maskEdges - infinityMask, dim = 4)
# B x P x E x N x N
# This will give me a matrix of all the alpha_ij coefficients.
# Re-inforce the zeros just to be sure
return aij * maskEdges # B x P x E x N x N
def graphAttention(x, a, W, S, negative_slope=0.2):
"""
graphAttention(x, a, W, S) Computes attention following GAT layer taking
into account multiple edge features.
Denote as G the number of input features, F the number of output features,
E the number of edge features, P the number of attention heads, Ji the
number of nodes in N_{i}, the neighborhood of node i, and N the number of
nodes. Let x_{i} in R^{G} be the feature associated to node i,
W^{ep} in R^{F x G} the weight marix associated to edge feature e and
attention head p, and a^{ep} in R^{2F} the mixing vector. Let
alpha_{ij}^{ep} in R the attention coefficient between nodes i and j, for
edge feature e and attention head p, and let s_{ij}^{e} be the value of
feature e of the edge connecting nodes i and j.
Let y_{i}^{p} in R^{F} be the output of the graph attention at node i for
attention head p. It is computed as
y_{i}^{p} = \sum_{e=1}^{E}
\sum_{j in N_{i}}
s_{ij}^{e} alpha_{ij}^{ep} W^{ep} x_{j}
with
alpha_{ij}^{ep} = softmax_{j} ( LeakyReLU_{beta} (
(a^{ep})^T [cat(W^{ep}x_{i}, W^{ep} x_{j})]
))
for all j in N_{i}, and where beta is the negative slope of the leaky ReLU.
Inputs:
x (torch.tensor): input;
shape: batch_size x input_features x number_nodes
a (torch.tensor): mixing parameter; shape:
number_heads x edge_features x 2 * output_features
W (torch.tensor): linear parameter; shape:
number_heads x edge_features x output_features x input_features
S (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
negative_slope (float): negative slope of the leaky relu (default: 0.2)
Outputs:
y: output; shape:
batch_size x number_heads x output_features x number_nodes
"""
B = x.shape[0] # batch_size
G = x.shape[1] # input_features
N = x.shape[2] # number_nodes
P = a.shape[0] # number_heads
E = a.shape[1] # edge_features
assert W.shape[0] == P
assert W.shape[1] == E
F = W.shape[2] # output_features
assert a.shape[2] == int(2*F)
G = W.shape[3] # input_features
assert S.shape[0] == E
assert S.shape[1] == S.shape[2] == N
# First, we need to learn the attention GSO
aij = learnAttentionGSO(x, a, W, S, negative_slope = negative_slope)
# B x P x E x N x N
# Then, we need to compute the high-level features
# W is of size P x E x F x G
# a is of size P x E x 2F
# Compute Wx for all nodes
x = x.reshape([B, 1, 1, G, N])
W = W.reshape([1, P, E, F, G])
Wx = torch.matmul(W, x) # B x P x E x F x N
# Finally, we just need to apply this matrix to the Wx which we have already
# computed, and done.
y = torch.matmul(Wx, S.reshape([1, 1, E, N, N]) * aij) # B x P x E x F x N
# And sum over all edges
return torch.sum(y, dim = 2) # B x P x F x N
class MaxLocalActivation(nn.Module):
# <NAME>, <EMAIL>, 2019/03/15
"""
MaxLocalActivation creates a localized activation function layer on graphs
Initialization:
MaxLocalActivation(K)
Inputs:
K (int): number of hops (>0)
Output:
torch.nn.Module for a localized max activation function layer
Add graph shift operator:
MaxLocalActivation.addGSO(GSO) Before applying the filter, we need to
define the GSO that we are going to use. This allows to change the GSO
while using the same filtering coefficients (as long as the number of
edge features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = MaxLocalActivation(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x number_nodes
Outputs:
y (torch.tensor): activated data; shape:
batch_size x dim_features x number_nodes
"""
def __init__(self, K):
super().__init__()
assert K > 0 # range has to be greater than 0
self.K = K
self.S = None # no GSO assigned yet
self.N = None # no GSO assigned yet (N learned from the GSO)
self.neighborhood = 'None' # no neighborhoods calculated yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(1,self.K+1))
# Initialize parameters
self.reset_parameters()
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
# The neighborhood matrix has to be a tensor of shape
# nOutputNodes x maxNeighborhoodSize
neighborhood = []
maxNeighborhoodSizes = []
for k in range(1,self.K+1):
# For each hop (0,1,...) in the range K
thisNeighborhood = graphTools.computeNeighborhood(
np.array(self.S), k, outputType='matrix')
# compute the k-hop neighborhood
neighborhood.append(torch.tensor(thisNeighborhood))
maxNeighborhoodSizes.append(thisNeighborhood.shape[1])
self.maxNeighborhoodSizes = maxNeighborhoodSizes
self.neighborhood = neighborhood
def forward(self, x):
# x should be of shape batchSize x dimNodeSignals x N
batchSize = x.shape[0]
dimNodeSignals = x.shape[1]
assert x.shape[2] == self.N
# And given that the self.neighborhood is already a torch.tensor matrix
# we can just go ahead and get it.
# So, x is of shape B x F x N. But we need it to be of shape
# B x F x N x maxNeighbor. Why? Well, because we need to compute the
# maximum between the value of each node and those of its neighbors.
# And we do this by applying a torch.max across the rows (dim = 3) so
# that we end up again with a B x F x N, but having computed the max.
# How to fill those extra dimensions? Well, what we have is neighborhood
# matrix, and we are going to use torch.gather to bring the right
# values (torch.index_select, while more straightforward, only works
# along a single dimension).
# Each row of the matrix neighborhood determines all the neighbors of
# each node: the first row contains all the neighbors of the first node,
# etc.
# The values of the signal at those nodes are contained in the dim = 2
# of x. So, just for now, let's ignore the batch and feature dimensions
# and imagine we have a column vector: N x 1. We have to pick some of
# the elements of this vector and line them up alongside each row
# so that then we can compute the maximum along these rows.
# When we torch.gather along dimension 0, we are selecting which row to
# pick according to each column. Thus, if we have that the first row
# of the neighborhood matrix is [1, 2, 0] means that we want to pick
# the value at row 1 of x, at row 2 of x in the next column, and at row
# 0 of the last column. For these values to be the appropriate ones, we
# have to repeat x as columns to build our b x F x N x maxNeighbor
# matrix.
xK = x # xK is a tensor aggregating the 0-hop (x), 1-hop, ..., K-hop
# max's it is initialized with the 0-hop neigh. (x itself)
xK = xK.unsqueeze(3) # extra dimension added for concatenation ahead
x = x.unsqueeze(3) # B x F x N x 1
# And the neighbors that we need to gather are the same across the batch
# and feature dimensions, so we need to repeat the matrix along those
# dimensions
for k in range(1,self.K+1):
x_aux = x.repeat([1, 1, 1, self.maxNeighborhoodSizes[k-1]])
gatherNeighbor = self.neighborhood[k-1].reshape(
[1,
1,
self.N,
self.maxNeighborhoodSizes[k-1]]
)
gatherNeighbor = gatherNeighbor.repeat([batchSize,
dimNodeSignals,
1,
1])
# And finally we're in position of getting all the neighbors in line
xNeighbors = torch.gather(x_aux, 2, gatherNeighbor.long())
# B x F x nOutput x maxNeighbor
# Note that this gather function already reduces the dimension to
# nOutputNodes.
# And proceed to compute the maximum along this dimension
v, _ = torch.max(xNeighbors, dim = 3)
v = v.unsqueeze(3) # to concatenate with xK
xK = torch.cat((xK,v),3)
out = torch.matmul(xK,self.weight.unsqueeze(2))
# multiply each k-hop max by corresponding weight
out = out.reshape([batchSize,dimNodeSignals,self.N])
return out
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.K)
self.weight.data.uniform_(-stdv, stdv)
def extra_repr(self):
if self.neighborhood is not None:
reprString = "neighborhood stored"
else:
reprString = "NO neighborhood stored"
return reprString
class MedianLocalActivation(nn.Module):
# <NAME>, <EMAIL>, 2019/03/27
"""
MedianLocalActivation creates a localized activation function layer on
graphs
Initialization:
MedianLocalActivation(K)
Inputs:
K (int): number of hops (>0)
Output:
torch.nn.Module for a localized median activation function layer
Add graph shift operator:
MedianLocalActivation.addGSO(GSO) Before applying the filter, we need
to define the GSO that we are going to use. This allows to change the
GSO while using the same filtering coefficients (as long as the number
of edge features is the same; but the number of nodes can change).
This function also calculates the 0-,1-,...,K-hop neighborhoods of every
node
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = MedianLocalActivation(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x number_nodes
Outputs:
y (torch.tensor): activated data; shape:
batch_size x dim_features x number_nodes
"""
def __init__(self, K):
super().__init__()
assert K > 0 # range has to be greater than 0
self.K = K
self.S = None # no GSO assigned yet
self.N = None # no GSO assigned yet (N learned from the GSO)
self.neighborhood = 'None' # no neighborhoods calculated yet
self.masks = 'None' # no mask yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(1,self.K+1))
# Initialize parameters
self.reset_parameters()
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
# The neighborhood matrix has to be a tensor of shape
# nOutputNodes x maxNeighborhoodSize
neighborhood = []
for k in range(1,self.K+1):
# For each hop (0,1,...) in the range K
thisNeighborhood = graphTools.computeNeighborhood(
np.array(self.S), k, outputType='list')
# compute the k-hop neighborhood
neighborhood.append(thisNeighborhood)
self.neighborhood = neighborhood
def forward(self, x):
# x should be of shape batchSize x dimNodeSignals x N
batchSize = x.shape[0]
dimNodeSignals = x.shape[1]
assert x.shape[2] == self.N
xK = x # xK is a tensor aggregating the 0-hop (x), 1-hop, ..., K-hop
# max's
# It is initialized with the 0-hop neigh. (x itself)
xK = xK.unsqueeze(3) # extra dimension added for concatenation ahead
#x = x.unsqueeze(3) # B x F x N x 1
for k in range(1,self.K+1):
kHopNeighborhood = self.neighborhood[k-1]
# Fetching k-hop neighborhoods of all nodes
kHopMedian = torch.empty(0)
# Initializing the vector that will contain the k-hop median for
# every node
for n in range(self.N):
# Iterating over the nodes
# This step is necessary because here the neighborhoods are
# lists of lists. It is impossible to pad them and feed them as
# a matrix, as this would impact the outcome of the median
# operation
nodeNeighborhood = torch.tensor(np.array(kHopNeighborhood[n]))
neighborhoodLen = len(nodeNeighborhood)
gatherNode = nodeNeighborhood.reshape([1, 1, neighborhoodLen])
gatherNode = gatherNode.repeat([batchSize, dimNodeSignals, 1])
# Reshaping the node neighborhood for the gather operation
xNodeNeighbors = torch.gather(x, 2, gatherNode.long())
# Gathering signal values in the node neighborhood
nodeMedian,_ = torch.median(xNodeNeighbors, dim = 2,
keepdim=True)
# Computing the median in the neighborhood
kHopMedian = torch.cat([kHopMedian,nodeMedian],2)
# Concatenating k-hop medians node by node
kHopMedian = kHopMedian.unsqueeze(3) # Extra dimension for
# concatenation with the previous (k-1)-hop median tensor
xK = torch.cat([xK,kHopMedian],3)
out = torch.matmul(xK,self.weight.unsqueeze(2))
# Multiplying each k-hop median by corresponding trainable weight
out = out.reshape([batchSize,dimNodeSignals,self.N])
return out
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.K)
self.weight.data.uniform_(-stdv, stdv)
def extra_repr(self):
if self.neighborhood is not None:
reprString = "neighborhood stored"
else:
reprString = "NO neighborhood stored"
return reprString
class NoActivation(nn.Module):
"""
NoActivation creates an activation layer that does nothing
It is for completeness, to be able to switch between linear models
and nonlinear models, without altering the entire architecture model
Initialization:
NoActivation()
Output:
torch.nn.Module for an empty activation layer
Forward call:
y = NoActivation(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x number_nodes
Outputs:
y (torch.tensor): activated data; shape:
batch_size x dim_features x number_nodes
"""
def __init__(self):
super().__init__()
def forward(self, x):
return x
def extra_repr(self):
reprString = "No Activation Function"
return reprString
class NoPool(nn.Module):
"""
This is a pooling layer that actually does no pooling. It has the same input
structure and methods of MaxPoolLocal() for consistency. Basically, this
allows us to change from pooling to no pooling without necessarily creating
a new architecture.
In any case, we're pretty sure this function should never ship, and pooling
can be avoided directly when defining the architecture.
"""
def __init__(self, nInputNodes, nOutputNodes, nHops):
super().__init__()
self.nInputNodes = nInputNodes
self.nOutputNodes = nOutputNodes
self.nHops = nHops
self.neighborhood = None
def addGSO(self, GSO):
# This is necessary to keep the form of the other pooling strategies
# within the SelectionGNN framework. But we do not care about any GSO.
pass
def forward(self, x):
# x should be of shape batchSize x dimNodeSignals x nInputNodes
assert x.shape[2] == self.nInputNodes
# Check that there are at least the same number of nodes that
# we will keep (otherwise, it would be unpooling, instead of
# pooling)
assert x.shape[2] >= self.nOutputNodes
# And do not do anything
return x
def extra_repr(self):
reprString = "in_dim=%d, out_dim=%d, number_hops = %d, " % (
self.nInputNodes, self.nOutputNodes, self.nHops)
reprString += "no neighborhood needed"
return reprString
class MaxPoolLocal(nn.Module):
"""
MaxPoolLocal Creates a pooling layer on graphs by selecting nodes
Initialization:
MaxPoolLocal(in_dim, out_dim, number_hops)
Inputs:
in_dim (int): number of nodes at the input
out_dim (int): number of nodes at the output
number_hops (int): number of hops to pool information
Output:
torch.nn.Module for a local max-pooling layer.
Observation: The selected nodes for the output are always the top ones.
Add a neighborhood set:
Add graph shift operator:
GraphFilter.addGSO(GSO) Before being used, we need to define the GSO
that will determine the neighborhood that we are going to pool.
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
v = MaxPoolLocal(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x in_dim
Outputs:
y (torch.tensor): pooled data; shape:
batch_size x dim_features x out_dim
"""
def __init__(self, nInputNodes, nOutputNodes, nHops):
super().__init__()
self.nInputNodes = nInputNodes
self.nOutputNodes = nOutputNodes
self.nHops = nHops
self.neighborhood = None
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N (And I don't care about E, because the
# computeNeighborhood function takes care of it)
self.N = S.shape[1]
assert S.shape[2] == self.N
# Get the device (before operating with S and losing it, it's cheaper
# to store the device now, than to duplicate S -i.e. keep a numpy and a
# tensor copy of S)
device = S.device
# Move the GSO to cpu and to np.array so it can be handled by the
# computeNeighborhood function
S = np.array(S.cpu())
# Compute neighborhood
neighborhood = graphTools.computeNeighborhood(S, self.nHops,
self.nOutputNodes,
self.nInputNodes,'matrix')
# And move the neighborhood back to a tensor
neighborhood = torch.tensor(neighborhood).to(device)
# The neighborhood matrix has to be a tensor of shape
# nOutputNodes x maxNeighborhoodSize
assert neighborhood.shape[0] == self.nOutputNodes
assert neighborhood.max() <= self.nInputNodes
# Store all the relevant information
self.maxNeighborhoodSize = neighborhood.shape[1]
self.neighborhood = neighborhood
def forward(self, x):
# x should be of shape batchSize x dimNodeSignals x nInputNodes
batchSize = x.shape[0]
dimNodeSignals = x.shape[1]
assert x.shape[2] == self.nInputNodes
# Check that there are at least the same number of nodes that
# we will keep (otherwise, it would be unpooling, instead of
# pooling)
assert x.shape[2] >= self.nOutputNodes
# And given that the self.neighborhood is already a torch.tensor matrix
# we can just go ahead and get it.
# So, x is of shape B x F x N. But we need it to be of shape
# B x F x N x maxNeighbor. Why? Well, because we need to compute the
# maximum between the value of each node and those of its neighbors.
# And we do this by applying a torch.max across the rows (dim = 3) so
# that we end up again with a B x F x N, but having computed the max.
# How to fill those extra dimensions? Well, what we have is neighborhood
# matrix, and we are going to use torch.gather to bring the right
# values (torch.index_select, while more straightforward, only works
# along a single dimension).
# Each row of the matrix neighborhood determines all the neighbors of
# each node: the first row contains all the neighbors of the first node,
# etc.
# The values of the signal at those nodes are contained in the dim = 2
# of x. So, just for now, let's ignore the batch and feature dimensions
# and imagine we have a column vector: N x 1. We have to pick some of
# the elements of this vector and line them up alongside each row
# so that then we can compute the maximum along these rows.
# When we torch.gather along dimension 0, we are selecting which row to
# pick according to each column. Thus, if we have that the first row
# of the neighborhood matrix is [1, 2, 0] means that we want to pick
# the value at row 1 of x, at row 2 of x in the next column, and at row
# 0 of the last column. For these values to be the appropriate ones, we
# have to repeat x as columns to build our b x F x N x maxNeighbor
# matrix.
x = x.unsqueeze(3) # B x F x N x 1
x = x.repeat([1, 1, 1, self.maxNeighborhoodSize]) # BxFxNxmaxNeighbor
# And the neighbors that we need to gather are the same across the batch
# and feature dimensions, so we need to repeat the matrix along those
# dimensions
gatherNeighbor = self.neighborhood.reshape([1, 1,
self.nOutputNodes,
self.maxNeighborhoodSize])
gatherNeighbor = gatherNeighbor.repeat([batchSize, dimNodeSignals, 1,1])
# And finally we're in position of getting all the neighbors in line
xNeighbors = torch.gather(x, 2, gatherNeighbor)
# B x F x nOutput x maxNeighbor
# Note that this gather function already reduces the dimension to
# nOutputNodes.
# And proceed to compute the maximum along this dimension
v, _ = torch.max(xNeighbors, dim = 3)
return v
def extra_repr(self):
reprString = "in_dim=%d, out_dim=%d, number_hops = %d, " % (
self.nInputNodes, self.nOutputNodes, self.nHops)
if self.neighborhood is not None:
reprString += "neighborhood stored"
else:
reprString += "NO neighborhood stored"
return reprString
class GraphFilter(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, E = 1, bias = True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(F, E, K, G))
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u = LSIGF(self.weight, self.S, x, self.bias)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class GraphFilterRNN(nn.Module):
"""
GraphFilterRNN Creates a (linear) layer that applies a graph filter
with Hidden Markov Model
Initialization:
GraphFilterRNN(in_features, out_features, hidden_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
hidden_features (int): number of hidden features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, H, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G # in_features
self.F = F # out_features
self.H = H # hidden_features
self.K = K # filter_taps
self.E = E # edge_features
self.S = None # No GSO assigned yet
# Create parameters:
self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G))
self.weight_B = nn.parameter.Parameter(torch.Tensor(H, E, K, H))
self.weight_U = nn.parameter.Parameter(torch.Tensor(F, E, K, H))
if bias:
self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_U = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv_a = 1. / math.sqrt(self.G * self.K)
self.weight_A.data.uniform_(-stdv_a, stdv_a)
if self.bias_A is not None:
self.bias_A.data.uniform_(-stdv_a, stdv_a)
stdv_b = 1. / math.sqrt(self.H * self.K)
self.weight_B.data.uniform_(-stdv_b, stdv_b)
if self.bias_B is not None:
self.bias_B.data.uniform_(-stdv_b, stdv_b)
stdv_u = 1. / math.sqrt(self.H * self.K)
self.weight_U.data.uniform_(-stdv_u, stdv_u)
if self.bias_U is not None:
self.bias_U.data.uniform_(-stdv_u, stdv_u)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
def forward(self, x, h):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N - Nin) \
.type(x.dtype).to(x.device)
), dim=2)
# Compute the filter output
u_a = LSIGF(self.weight_A, self.S, x, self.bias_A)
u_b = LSIGF(self.weight_B, self.S, h, self.bias_B)
h = u_a + u_b
u = LSIGF(self.weight_U, self.S, h, self.bias_U)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, hidden_features=%d" % (
self.G, self.F, self.H) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) + \
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class SpectralGF(nn.Module):
"""
SpectralGF Creates a (linear) layer that applies a LSI graph filter in the
spectral domain using a cubic spline if needed.
Initialization:
GraphFilter(in_features, out_features, filter_coeff,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_coeff (int): number of filter spectral coefficients
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer) implemented in the spectral domain.
Observation: Filter taps have shape
out_features x edge_features x in_features x filter_coeff
Add graph shift operator:
SpectralGF.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = SpectralGF(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, M, E = 1, bias = True):
# GSOs will be added later.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.M = M
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(F, E, G, M))
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.M)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has to have 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S # Save S
# Now we need to compute the eigendecomposition and save it
# To compute the eigendecomposition, we use numpy.
# So, first, get S in numpy format.
Snp = np.array(S.data.cpu())
# We will compute the eigendecomposition for each edge feature, so we
# create the E x N x N space for V, VH and Lambda (we need lambda for
# the spline kernel)
V = np.zeros([self.E, self.N, self.N])
VH = np.zeros([self.E, self.N, self.N])
Lambda = np.zeros([self.E, self.N])
# Here we save the resulting spline kernel matrix
splineKernel = np.zeros([self.E, self.N, self.M])
for e in range(self.E):
# Compute the eigendecomposition
Lambda[e,:], V[e,:,:] = np.linalg.eig(Snp[e,:,:])
# Compute the hermitian
VH[e,:,:] = V[e,:,:].conj().T
# Compute the splineKernel basis matrix
splineKernel[e,:,:] = graphTools.splineBasis(self.M, Lambda[e,:])
# Transform everything to tensors of appropriate type on appropriate
# device, and store them.
self.V = torch.tensor(V).type(S.dtype).to(S.device) # E x N x N
self.VH = torch.tensor(VH).type(S.dtype).to(S.device) # E x N x N
self.splineKernel = torch.tensor(splineKernel)\
.type(S.dtype).to(S.device)
# E x N x M
# Once we have computed the splineKernel, we do not need to save the
# eigenvalues.
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# Check if we have enough spectral filter coefficients as needed, or if
# we need to fill out the rest using the spline kernel.
if self.M == self.N:
self.h = self.weight # F x E x G x N (because N = M)
else:
# Adjust dimensions for proper algebraic matrix multiplication
splineKernel = self.splineKernel.reshape([1,self.E,self.N,self.M])
# We will multiply a 1 x E x N x M matrix with a F x E x M x G
# matrix to get the proper F x E x N x G coefficients
self.h = torch.matmul(splineKernel, self.weight.permute(0,1,3,2))
# And now we rearrange it to the same shape that the function takes
self.h = self.h.permute(0,1,3,2) # F x E x G x N
# And now we add the zero padding (if this comes from a pooling
# operation)
if Nin < self.N:
zeroPad = torch.zeros(B, F, self.N-Nin).type(x.dtype).to(x.device)
x = torch.cat((x, zeroPad), dim = 2)
# Compute the filter output
u = spectralGF(self.h, self.V, self.VH, x, self.bias)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class NodeVariantGF(nn.Module):
"""
NodeVariantGF Creates a filtering layer that applies a node-variant graph
filter
Initialization:
NodeVariantGF(in_features, out_features, shift_taps, node_taps
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
shift_taps (int): number of filter taps for shifts
node_taps (int): number of filter taps for nodes
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer using node-variant graph
filters.
Observation: Filter taps have shape
out_features x edge_features x shift_taps x in_features x node_taps
Add graph shift operator:
NodeVariantGF.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = NodeVariantGF(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, M, E = 1, bias = True):
# G: Number of input features
# F: Number of output features
# K: Number of filter shift taps
# M: Number of filter node taps
# GSOs will be added later.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.M = M
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(F, E, K, G, M))
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K * self.M)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
npS = np.array(S.data.cpu()) # Save the GSO as a numpy array because we
# are going to compute the neighbors.
# And now we have to fill up the parameter vector, from M to N
if self.M < self.N:
# The first elements of M (ordered with whatever order we want)
# are the ones associated to independent node taps.
copyNodes = [m for m in range(self.M)]
# The rest of the nodes will copy one of these M node taps.
# The way we do this is: if they are connected to one of the M
# indepdendent nodes, just copy it. If they are not connected,
# look at the neighbors, neighbors, and so on, until we reach one
# of the independent nodes.
# Ties are broken by selecting the node with the smallest index
# (which, due to the ordering, is the most important node of all
# the available ones)
neighborList = graphTools.computeNeighborhood(npS, 1,
nb = self.M)
# This gets the list of 1-hop neighbors for all nodes.
# Find the nodes that have no neighbors
nodesWithNoNeighbors = [n for n in range(self.N) \
if len(neighborList[n]) == 0]
# If there are still nodes that didn't find a neighbor
K = 1 # K-hop neighbor we have looked so far
while len(nodesWithNoNeighbors) > 0:
# Looks for the next hop
K += 1
# Get the neigbors one further hop away
thisNeighborList = graphTools.computeNeighborhood(npS,
K,
nb = self.M)
# Check if we now have neighbors for those that didn't have
# before
for n in nodesWithNoNeighbors:
# Get the neighbors of the node
thisNodeList = thisNeighborList[n]
# If there are neighbors
if len(thisNodeList) > 0:
# Add them to the list
neighborList[n] = thisNodeList
# Recheck if all nodes have non-empty neighbors
nodesWithNoNeighbors = [n for n in range(self.N) \
if len(neighborList[n]) == 0]
# Now we have obtained the list of independent nodes connected to
# all nodes, we keep the one with highest score. And since the
# matrix is already properly ordered, this means keeping the
# smallest index in the neighborList.
for m in range(self.M, self.N):
copyNodes.append(min(neighborList[m]))
# And, finally create the indices of nodes to copy
self.copyNodes = torch.tensor(copyNodes).to(S.device)
elif self.M == self.N:
# In this case, all parameters go into the vector h
self.copyNodes = torch.arange(self.M).to(S.device)
else:
# This is the rare case in which self.M < self.N, for example, if
# we train in a larger network and deploy in a smaller one. Since
# the matrix is ordered by score, we just keep the first N
# weights
self.copyNodes = torch.arange(self.N).to(S.device)
# OBS.: self.weight is updated on each training step, so we cannot
# define the self.h vector (i.e. the vector with N elements) here,
# because otherwise it wouldn't be updated every time. So we need, in
# the for, to use index_select on the actual weights, to create the
# vector h that is later feed into the NVGF computation.
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# If we have less filter coefficients than the required ones, we need
# to use the copying scheme
if self.M == self.N:
self.h = self.weight
else:
self.h = torch.index_select(self.weight, 4, self.copyNodes)
# And now we add the zero padding
if Nin < self.N:
zeroPad = torch.zeros(B, F, self.N-Nin).type(x.dtype).to(x.device)
x = torch.cat((x, zeroPad), dim = 2)
# Compute the filter output
u = NVGF(self.h, self.S, x, self.bias)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "shift_taps=%d, node_taps=%d, " % (
self.K, self.M) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class EdgeVariantGF(nn.Module):
"""
EdgeVariantGF Creates a (linear) layer that applies an edge-variant graph
filter using the masking approach. If less nodes than the total number
of nodes are selected, then the remaining nodes adopt an LSI filter
(i.e. it becomes a hybrid edge-variant grpah filter)
Initialization:
EdgeVariantGF(in_features, out_features, shift_taps,
selected_nodes, number_nodes,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
shift_taps (int): number of shifts to consider
selected_nodes (int): number of selected nodes to implement the EV
part of the filter
number_nodes (int): number of nodes
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer using hybrid
edge-variant graph filters.
Observation: Filter taps have shape
out_features x edge_features x shift_taps x in_features
x number_nodes x number_nodes
These weights are masked by the corresponding sparsity pattern of
the graph and the desired number of selected nodes, so only weights
in the nonzero edges of these nodes will be trained, the
rest of the parameters contain trash. Therefore, the number of
parameters will not reflect the actual number of parameters being
trained.
Add graph shift operator:
EdgeVariantGF.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = EdgeVariantGF(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, M, N, E=1, bias = True):
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.E = E
self.M = M # Number of selected nodes
self.N = N # Total number of nodes
self.S = None
# Create parameters for the Edge-Variant part:
self.weightEV = nn.parameter.Parameter(torch.Tensor(F, E, K, G, N, N))
# If we want a hybrid, create parameters
if self.M < self.N:
self.weightLSI = nn.parameter.Parameter(torch.Tensor(F, E, K, G))
else:
self.register_parameter('weightLSI', None)
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K * self.N)
self.weightEV.data.uniform_(-stdv, stdv)
if self.weightLSI is not None:
self.weightLSI.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S # Save the GSO
# Get the identity matrix across all edge features
multipleIdentity = torch.eye(self.N).reshape([1, self.N, self.N])\
.repeat(self.E, 1, 1).to(S.device)
# Compute the nonzero elements of S+I_{N}
sparsityPattern = ((torch.abs(S) + multipleIdentity) > zeroTolerance)
# Change from byte tensors to float tensors (or the same type of data as
# the GSO)
sparsityPattern = sparsityPattern.type(S.dtype)
# But now we need to kill everything that is between elements M and N
# (only if M < N)
if self.M < self.N:
# Create the ones in the row
hybridMaskOnesRows = torch.ones([self.M, self.N])
# Create the ones int he columns
hybridMaskOnesCols = torch.ones([self.N - self.M, self.M])
# Create the zeros
hybridMaskZeros = torch.zeros([self.N - self.M, self.N - self.M])
# Concatenate the columns
hybridMask = torch.cat((hybridMaskOnesCols,hybridMaskZeros), dim=1)
# Concatenate the rows
hybridMask = torch.cat((hybridMaskOnesRows,hybridMask), dim=0)
else:
hybridMask = torch.ones([self.N, self.N])
# Now that we have the hybrid mask, we need to mask the sparsityPattern
# we got so far
hybridMask = hybridMask.reshape([1, self.N, self.N]).to(S.device)
# 1 x N x N
sparsityPattern = sparsityPattern * hybridMask
self.sparsityPattern = sparsityPattern.to(S.device)
# E x N x N
# This gives the sparsity pattern for each edge feature
# Now, let's create it of the right shape, so we do not have to go
# around wasting time with reshapes when called in the forward
# The weights have shape F x E x K x G x N x N
# The sparsity pattern has shape E x N x N. And we want to make it
# 1 x E x K x 1 x N x N. The K dimension is to guarantee that for k=0
# we have the identity
multipleIdentity = (multipleIdentity * hybridMask)\
.reshape([1, self.E, 1, 1, self.N, self.N])
# This gives a 1 x E x 1 x 1 x N x N identity matrix
sparsityPattern = sparsityPattern\
.reshape([1, self.E, 1, 1, self.N, self.N])
# This gives a 1 x E x 1 x 1 x N x N sparsity pattern matrix
sparsityPattern = sparsityPattern.repeat(1, 1, self.K-1, 1, 1, 1)
# This repeats the sparsity pattern K-1 times giving a matrix of shape
# 1 x E x (K-1) x 1 x N x N
sparsityPattern = torch.cat((multipleIdentity,sparsityPattern), dim = 2)
# This sholud give me a 1 x E x K x 1 x N x N matrix with the identity
# in the first element
self.sparsityPatternFull = sparsityPattern.type(S.dtype).to(S.device)
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# Mask the parameters
self.Phi = self.weightEV * self.sparsityPatternFull
# And now we add the zero padding
if Nin < self.N:
zeroPad = torch.zeros(B, F, self.N-Nin).type(x.dtype).to(x.device)
x = torch.cat((x, zeroPad), dim = 2)
# Compute the filter output for the EV part
uEV = EVGF(self.Phi, x, self.bias)
# Check if we need an LSI part
if self.M < self.N:
# Compute the filter output for the LSI part
uLSI = LSIGF(self.weightLSI, self.S, x, self.bias)
else:
# If we don't, just add zero
uLSI = torch.tensor(0., dtype = uEV.dtype).to(uEV.device)
# Add both
u = uEV + uLSI
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "shift_taps=%d, " % (
self.K) + \
"selected_nodes=%d, " % (self.M) +\
"number_nodes=%d, " % (self.N) +\
"edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class GraphAttentional(nn.Module):
"""
GraphAttentional Creates a graph attentional layer
Initialization:
GraphAttentional(in_features, out_features, attention_heads,
edge_features=1, nonlinearity=nn.functional.relu,
concatenate=True)
Inputs:
in_features (int): number of input features on top of each node
out_features (int): number of output features on top of each node
attention_heads (int): number of attention_heads
edge_features (int): number of features on top of each edge
(default: 1)
nonlinearity (nn.functional): nonlinearity applied after features
have been updated through attention (default:nn.functional.relu)
concatenate (bool): If True, the output of the attention_heads
attention heads are concatenated to form the output features, if
False, they are averaged (default: True)
Output:
torch.nn.Module for a graph attentional layer.
Add graph shift operator:
GraphAttentional.addGSO(GSO) Before applying the filter, we need to
define the GSO that we are going to use. This allows to change the GSO
while using the same filtering coefficients (as long as the number of
edge features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = GraphAttentional(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, E = 1,
nonlinearity = nn.functional.relu, concatenate = True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.E = E
self.S = None # No GSO assigned yet
self.nonlinearity = nonlinearity
self.concatenate = concatenate
# Create parameters:
self.mixer = nn.parameter.Parameter(torch.Tensor(K, E, 2*F))
self.weight = nn.parameter.Parameter(torch.Tensor(K, E, F, G))
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K)
self.weight.data.uniform_(-stdv, stdv)
self.mixer.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# And get the graph attention output
y = graphAttention(x, self.mixer, self.weight, self.S)
# This output is of size B x K x F x N. Now, we can either concatenate
# them (inner layers) or average them (outer layer)
if self.concatenate:
# When we concatenate we first apply the nonlinearity
y = self.nonlinearity(y)
# Concatenate: Make it B x KF x N such that first iterates over f
# and then over k: (k=0,f=0), (k=0,f=1), ..., (k=0,f=F-1), (k=1,f=0),
# (k=1,f=1), ..., etc.
y = y.permute(0, 3, 1, 2)\
.reshape([B, self.N, self.K*self.F])\
.permute(0, 2, 1)
else:
# When we don't, we first average
y = torch.mean(y, dim = 1) # B x F x N
# And then we apply the nonlinearity
y = self.nonlinearity(y)
if Nin < self.N:
y = torch.index_select(y, 2, torch.arange(Nin).to(y.device))
return y
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "attention_heads=%d, " % (
self.K) + "edge_features=%d, " % (self.E)
if self.S is not None:
reprString += "GSO stored: number_nodes=%d" % (self.N)
else:
reprString += "no GSO stored"
return reprString
def matrixPowersBatch(S, K):
"""
matrixPowers(A_b, K) Computes the matrix powers A_b^k for k = 0, ..., K-1
for each A_b in b = 1, ..., B.
Inputs:
A (tensor): Matrices to compute powers. It can be either a single matrix
per batch element: shape batch_size x number_nodes x number_nodes
or contain edge features: shape
batch_size x edge_features x number_nodes x number_nodes
K (int): maximum power to be computed (up to K-1)
Outputs:
AK: either a collection of K matrices B x K x N x N (if the input was a
single matrix) or a collection B x E x K x N x N (if the input was a
collection of E matrices).
"""
# S can be either a single GSO (N x N) or a collection of GSOs (E x N x N)
if len(S.shape) == 3:
B = S.shape[0]
N = S.shape[1]
assert S.shape[2] == N
E = 1
S = S.unsqueeze(1)
scalarWeights = True
elif len(S.shape) == 4:
B = S.shape[0]
E = S.shape[1]
N = S.shape[2]
assert S.shape[3] == N
scalarWeights = False
# Now, let's build the powers of S:
thisSK = torch.eye(N).repeat([B, E, 1, 1]).to(S.device)
SK = thisSK.unsqueeze(2)
for k in range(1, K):
thisSK = torch.matmul(thisSK, S)
SK = torch.cat((SK, thisSK.unsqueeze(2)), dim=2)
# Take out the first dimension if it was a single GSO
if scalarWeights:
SK = SK.squeeze(1)
return SK
def batchLSIGF(h, SK, x, bias=None):
"""
batchLSIGF(filter_taps, GSO_K, input, bias=None) Computes the output of a
linear shift-invariant graph filter on input and then adds bias.
In this case, we consider that there is a separate GSO to be used for each
of the signals in the batch. In other words, SK[b] is applied when filtering
x[b] as opposed to applying the same SK to all the graph signals in the
batch.
Inputs:
filter_taps: vector of filter taps; size:
output_features x edge_features x filter_taps x input_features
GSO_K: collection of matrices; size:
batch_size x edge_features x filter_taps x number_nodes x number_nodes
input: input signal; size:
batch_size x input_features x number_nodes
bias: size: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; size:
batch_size x output_features x number_nodes
"""
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
K = h.shape[2]
G = h.shape[3]
B = SK.shape[0]
assert SK.shape[1] == E
assert SK.shape[2] == K
N = SK.shape[3]
assert SK.shape[4] == N
assert x.shape[0] == B
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation I've been using:
# h in F x E x K x G
# SK in B x E x K x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
SK = SK.permute(1, 2, 0, 3, 4)
# Now, SK is of shape E x K x B x N x N so that we can multiply by x of
# size B x G x N to get
z = torch.matmul(x, SK)
# which is of size E x K x B x G x N.
# Now, we have already carried out the multiplication across the dimension
# of the nodes. Now we need to focus on the K, F, G.
# Let's start by putting B and N in the front
z = z.permute(2, 4, 0, 1, 3).reshape([B, N, E * K * G])
# so that we get z in B x N x EKG.
# Now adjust the filter taps so they are of the form EKG x F
h = h.reshape([F, G * E * K]).permute(1, 0)
# Multiply
y = torch.matmul(z, h)
# to get a result of size B x N x F. And permute
y = y.permute(0, 2, 1)
# to get it back in the right order: B x F x N.
# Now, in this case, each element x[b,:,:] has adequately been filtered by
# the GSO S[b,:,:,:]
if bias is not None:
y = y + bias
return y
class GraphFilterBatchGSO(GraphFilter):
"""
GraphFilterBatchGSO Creates a (linear) layer that applies a graph filter
with a different GSO for each signal in the batch.
This function is typically useful when not only the graph signal is changed
during training, but also the GSO. That is, each data point in the batch is
of the form (x_b,S_b) for b = 1,...,B instead of just x_b. The filter
coefficients are still the same being applied to all graph filters, but both
the GSO and the graph signal are different for each datapoint in the batch.
Initialization:
GraphFilterBatchGSO(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilterBatchGSO.addGSO(GSO) Before applying the filter, we need to
define the GSOs that we are going to use for each element of the batch.
Each GSO has to have the same number of edges, but the number of nodes
can change.
Inputs:
GSO (tensor): collection of graph shift operators; size can be
batch_size x number_nodes x number_nodes, or
batch_size x edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilterBatchGSO(x)
Inputs:
x (tensor): input data; size: batch_size x in_features x number_nodes
Outputs:
y (tensor): output; size: batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__(G, F, K, E, bias)
def addGSO(self, S):
# So, we have to take into account the situation where S is either
# B x N x N or B x E x N x N. No matter what, we're always handling,
# internally the dimension E. So if the input is B x N x N, we have to
# unsqueeze it so it becomes B x 1 x N x N.
if len(S.shape) == 3 and S.shape[1] == S.shape[2]:
self.S = S.unsqueeze(1)
elif len(S.shape) == 4 and S.shape[1] == self.E \
and S.shape[2] == S.shape[3]:
self.S = S
else:
# TODO: print error
pass
self.N = self.S.shape[2]
self.B = self.S.shape[0]
self.SK = matrixPowersBatch(self.S, self.K)
def forward(self, x):
# TODO: If S (and consequently SK) hasn't been defined, print an error.
return batchLSIGF(self.weight, self.SK, x, self.bias)
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) + \
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored: number_nodes=%d, batch_size=%d" % (
self.N, self.B)
else:
reprString += "no GSO stored"
return reprString
def BatchLSIGF(h, S, x, b=None):
"""
LSIGF(filter_taps, GSO, input, bias=None) Computes the output of a linear
shift-invariant graph filter on input and then adds bias.
Denote as G the number of input features, F the number of output features,
E the number of edge features, K the number of filter taps, N the number of
nodes, S_{e} in R^{N x N} the GSO for edge feature e, x in R^{G x N} the
input data where x_{g} in R^{N} is the graph signal representing feature
g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the
bias for feature f.
Then, the LSI-GF is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{k=0}^{K-1}
\sum_{g=1}^{G}
[h_{f,g,e}]_{k} S_{e}^{k} x_{g}
+ b_{f}
for f = 1, ..., F.
Inputs:
filter_taps (torch.tensor): array of filter taps; shape:
output_features x edge_features x filter_taps x input_features
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
"""
# The basic idea of what follows is to start reshaping the input and the
# GSO so the filter coefficients go just as a very plain and simple
# linear operation, so that all the derivatives and stuff on them can be
# easily computed.
# h is output_features x edge_weights x filter_taps x input_features
# S is edge_weighs x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
K = h.shape[2]
G = h.shape[3]
assert S.shape[1] == E
N = S.shape[2]
assert S.shape[3] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation we've been using:
# h in F x E x K x G
# S in B x E x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# Now, we have x in B x G x N and S in B x E x N x N, and we want to come up
# with matrix multiplication that yields z = x * S with shape
# B x E x K x G x N.
# For this, we first add the corresponding dimensions
x = x.reshape([B, 1, G, N])
# print(S)
S = S.reshape([B, E, N, N])
z = x.reshape([B, 1, 1, G, N]).repeat(1, E, 1, 1, 1) # This is for k = 0
# We need to repeat along the E dimension, because for k=0, S_{e} = I for
# all e, and therefore, the same signal values have to be used along all
# edge feature dimensions.
for k in range(1,K):
x = torch.matmul(x, S.float()) # B x E x G x N
xS = x.reshape([B, E, 1, G, N]) # B x E x 1 x G x N
z = torch.cat((z, xS), dim = 2) # B x E x k x G x N
# This output z is of size B x E x K x G x N
# Now we have the x*S_{e}^{k} product, and we need to multiply with the
# filter taps.
# We multiply z on the left, and h on the right, the output is to be
# B x N x F (the multiplication is not along the N dimension), so we reshape
# z to be B x N x E x K x G and reshape it to B x N x EKG (remember we
# always reshape the last dimensions), and then make h be E x K x G x F and
# reshape it to EKG x F, and then multiply
y = torch.matmul(z.permute(0, 4, 1, 2, 3).reshape([B, N, E*K*G]),
h.reshape([F, E*K*G]).permute(1, 0)).permute(0, 2, 1)
# And permute againt to bring it from B x N x F to B x F x N.
# Finally, add the bias
if b is not None:
y = y + b
return y
class GraphFilterBatch(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
Batch edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, E = 1, bias = True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(F, E, K, G))
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 4 dimensions.
assert len(S.shape) == 4
# S is of shape B x E x N x N
assert S.shape[1] == self.E
self.N = S.shape[2]
assert S.shape[3] == self.N
self.S = S
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u = BatchLSIGF(self.weight, self.S, x, self.bias)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class GraphFilterRNNBatch(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
Batch edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, H, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.H = H # hidden_features
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G))
self.weight_B = nn.parameter.Parameter(torch.Tensor(H, E, K, H))
self.weight_D = nn.parameter.Parameter(torch.Tensor(F, E, K, H))
if bias:
self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_D = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv_a = 1. / math.sqrt(self.G * self.K)
self.weight_A.data.uniform_(-stdv_a, stdv_a)
if self.bias_A is not None:
self.bias_A.data.uniform_(-stdv_a, stdv_a)
stdv_b = 1. / math.sqrt(self.H * self.K)
self.weight_B.data.uniform_(-stdv_b, stdv_b)
if self.bias_B is not None:
self.bias_B.data.uniform_(-stdv_b, stdv_b)
stdv_d = 1. / math.sqrt(self.H * self.K)
self.weight_D.data.uniform_(-stdv_d, stdv_d)
if self.bias_D is not None:
self.bias_D.data.uniform_(-stdv_d, stdv_d)
def addGSO(self, S):
# Every S has 4 dimensions.
assert len(S.shape) == 4
# S is of shape B x E x N x N
assert S.shape[1] == self.E
self.N = S.shape[2]
assert S.shape[3] == self.N
self.S = S
# def initialize_hidden(self):
# # the weights are of the form (nb_layers, batch_size, nb_lstm_units)
# hidden = torch.zeros(self.config.batch_size, self.F, self.numAgents)
#
# self.hiddenstateGPU = hidden.to(self.config.device)
def updateHiddenState(self, hiddenState):
self.hiddenState = hiddenState
def detachHiddenState(self):
# tensor.detach() creates a tensor that shares storage with tensor that does not require grad.
# You should use detach() when attempting to remove a tensor from a computation graph
#https://discuss.pytorch.org/t/clone-and-detach-in-v0-4-0/16861/4
self.hiddenState.detach_()
self.hiddenStateNext.detach_()
pass
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u_a = BatchLSIGF(self.weight_A, self.S, x, self.bias_A)
u_b = BatchLSIGF(self.weight_B, self.S, self.hiddenState, self.bias_B)
sigma = nn.ReLU(inplace=True)
# sigma = nn.Tanh()
self.hiddenStateNext = sigma(u_a + u_b)
u = BatchLSIGF(self.weight_D, self.S, self.hiddenStateNext, self.bias_D)
self.updateHiddenState(self.hiddenStateNext)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, hidden_features=%d, " % (
self.G, self.F, self.H) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias_D is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
def torchpermul(h, x, b=None):
# h is output_features x edge_weights x filter_taps x input_features
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
# in the notation we've been using:
# h in G x H
# x in B x H x N
# b in G x N
# y in B x G x N
# Now, we have x in B x H x N and h in G x H
# B x N x H with H x G -> B x N x G -> B x G x N
y = torch.mul(x.permute(0, 2, 1), h.permute(1, 0)).permute(0, 2, 1) # B x G x N
# Finally, add the bias
if b is not None:
y = y + b
return y
class GraphFilterMoRNNBatch(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
Batch edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, H, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.H = H # hidden_features
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G))
self.weight_B = nn.parameter.Parameter(torch.Tensor(H, H))
self.weight_D = nn.parameter.Parameter(torch.Tensor(F, H))
if bias:
self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_D = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# todo: check initialize weight
# Taken from _ConvNd initialization of parameters:
stdv_a = 1. / math.sqrt(self.G * self.K)
self.weight_A.data.uniform_(-stdv_a, stdv_a)
if self.bias_A is not None:
self.bias_A.data.uniform_(-stdv_a, stdv_a)
stdv_b = 1. / math.sqrt(self.H)
self.weight_B.data.uniform_(-stdv_b, stdv_b)
if self.bias_B is not None:
self.bias_B.data.uniform_(-stdv_b, stdv_b)
stdv_d = 1. / math.sqrt(self.H )
self.weight_D.data.uniform_(-stdv_d, stdv_d)
if self.bias_D is not None:
self.bias_D.data.uniform_(-stdv_d, stdv_d)
def addGSO(self, S):
# Every S has 4 dimensions.
assert len(S.shape) == 4
# S is of shape B x E x N x N
assert S.shape[1] == self.E
self.N = S.shape[2]
assert S.shape[3] == self.N
self.S = S
def updateHiddenState(self, hiddenState):
self.hiddenState = hiddenState
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u_a = BatchLSIGF(self.weight_A, self.S, x, self.bias_A) # B x H x n
# u_b = torch.mul(self.hiddenState.permute(0,2,1), self.weight_B.permute(1, 0)).permute(0,2,1) + self.bias_B # B x H x n
u_b = torchpermul(self.weight_B,self.hiddenState,self.bias_B)
sigma = nn.ReLU(inplace=True)
# sigma = nn.Tanh()
self.hiddenStateNext = sigma(u_a + u_b)
# v1
# u = torch.mul(self.weight_D, self.hiddenState) + self.bias_D
# v2
# u = torch.mul(u_a.permute(0,2,1), self.weight_D.permute(1, 0)).permute(0,2,1) + self.bias_D
u = torchpermul(self.weight_D, self.hiddenStateNext, self.bias_D)
self.updateHiddenState(self.hiddenStateNext)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, hidden_features=%d, " % (
self.G, self.F, self.H) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias_D is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class GraphFilterL2ShareBatch(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
Batch edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, H, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.H = H # hidden_features
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G))
self.weight_B = nn.parameter.Parameter(torch.Tensor(H, H))
self.weight_D = nn.parameter.Parameter(torch.Tensor(F, H))
if bias:
self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_D = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# todo: check initialize weight
# Taken from _ConvNd initialization of parameters:
stdv_a = 1. / math.sqrt(self.G * self.K)
self.weight_A.data.uniform_(-stdv_a, stdv_a)
if self.bias_A is not None:
self.bias_A.data.uniform_(-stdv_a, stdv_a)
stdv_b = 1. / math.sqrt(self.H)
self.weight_B.data.uniform_(-stdv_b, stdv_b)
if self.bias_B is not None:
self.bias_B.data.uniform_(-stdv_b, stdv_b)
stdv_d = 1. / math.sqrt(self.H )
self.weight_D.data.uniform_(-stdv_d, stdv_d)
if self.bias_D is not None:
self.bias_D.data.uniform_(-stdv_d, stdv_d)
def addGSO(self, S):
# Every S has 4 dimensions.
assert len(S.shape) == 4
# S is of shape B x E x N x N
assert S.shape[1] == self.E
self.N = S.shape[2]
assert S.shape[3] == self.N
self.S = S
def updateHiddenState(self, hiddenState):
self.hiddenState = hiddenState
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u_a = BatchLSIGF(self.weight_A, self.S, x, self.bias_A) # B x H x n
u_b = torchpermul(self.weight_B, self.hiddenState, self.bias_B)
sigma = nn.ReLU(inplace=True)
# sigma = nn.Tanh()
self.hiddenStateNext = sigma(u_a + u_b)
# u = torch.mul(u_a.permute(0,2,1), self.weight_D.permute(1, 0)).permute(0,2,1) + self.bias_D
u = torchpermul(self.weight_D, self.hiddenStateNext, self.bias_D)
self.updateHiddenState(self.hiddenStateNext)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, hidden_features=%d, " % (
self.G, self.F, self.H) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias_D is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
|
[
"torch.eye",
"torch.empty",
"torch.cat",
"torch.arange",
"torch.nn.functional.leaky_relu",
"torch.ones",
"torch.median",
"torch.gather",
"numpy.linalg.eig",
"torch.Tensor",
"utils.graphUtils.graphTools.splineBasis",
"torch.zeros",
"torch.matmul",
"utils.graphUtils.graphTools.computeNeighborhood",
"torch.mean",
"math.sqrt",
"torch.max",
"torch.sum",
"torch.nn.ReLU",
"numpy.zeros",
"torch.nn.functional.softmax",
"torch.index_select",
"numpy.array",
"torch.abs",
"torch.tensor"
] |
[((10592, 10617), 'torch.matmul', 'torch.matmul', (['Vdiagh', 'VHx'], {}), '(Vdiagh, VHx)\n', (10604, 10617), False, 'import torch\n'), ((10781, 10800), 'torch.sum', 'torch.sum', (['y'], {'dim': '(3)'}), '(y, dim=3)\n', (10790, 10800), False, 'import torch\n'), ((10844, 10863), 'torch.sum', 'torch.sum', (['y'], {'dim': '(2)'}), '(y, dim=2)\n', (10853, 10863), False, 'import torch\n'), ((14704, 14724), 'torch.sum', 'torch.sum', (['zh'], {'dim': '(4)'}), '(zh, dim=4)\n', (14713, 14724), False, 'import torch\n'), ((14748, 14767), 'torch.sum', 'torch.sum', (['y'], {'dim': '(3)'}), '(y, dim=3)\n', (14757, 14767), False, 'import torch\n'), ((14791, 14810), 'torch.sum', 'torch.sum', (['y'], {'dim': '(2)'}), '(y, dim=2)\n', (14800, 14810), False, 'import torch\n'), ((18115, 18134), 'torch.matmul', 'torch.matmul', (['Sk', 'x'], {}), '(Sk, x)\n', (18127, 18134), False, 'import torch\n'), ((18995, 19014), 'torch.sum', 'torch.sum', (['z'], {'dim': '(4)'}), '(z, dim=4)\n', (19004, 19014), False, 'import torch\n'), ((19042, 19061), 'torch.sum', 'torch.sum', (['z'], {'dim': '(3)'}), '(z, dim=3)\n', (19051, 19061), False, 'import torch\n'), ((19089, 19108), 'torch.sum', 'torch.sum', (['z'], {'dim': '(2)'}), '(z, dim=2)\n', (19098, 19108), False, 'import torch\n'), ((22006, 22024), 'torch.matmul', 'torch.matmul', (['W', 'x'], {}), '(W, x)\n', (22018, 22024), False, 'import torch\n'), ((22964, 23024), 'torch.nn.functional.leaky_relu', 'nn.functional.leaky_relu', (['aWx'], {'negative_slope': 'negative_slope'}), '(aWx, negative_slope=negative_slope)\n', (22988, 23024), True, 'import torch.nn as nn\n'), ((23790, 23850), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['(eij * maskEdges - infinityMask)'], {'dim': '(4)'}), '(eij * maskEdges - infinityMask, dim=4)\n', (23811, 23850), True, 'import torch.nn as nn\n'), ((26780, 26798), 'torch.matmul', 'torch.matmul', (['W', 'x'], {}), '(W, x)\n', (26792, 26798), False, 'import torch\n'), ((27050, 27069), 'torch.sum', 'torch.sum', (['y'], {'dim': '(2)'}), '(y, dim=2)\n', (27059, 27069), False, 'import torch\n'), ((89696, 89715), 'torch.matmul', 'torch.matmul', (['x', 'SK'], {}), '(x, SK)\n', (89708, 89715), False, 'import torch\n'), ((90179, 90197), 'torch.matmul', 'torch.matmul', (['z', 'h'], {}), '(z, h)\n', (90191, 90197), False, 'import torch\n'), ((4612, 4630), 'torch.matmul', 'torch.matmul', (['x', 'S'], {}), '(x, S)\n', (4624, 4630), False, 'import torch\n'), ((4719, 4744), 'torch.cat', 'torch.cat', (['(z, xS)'], {'dim': '(2)'}), '((z, xS), dim=2)\n', (4728, 4744), False, 'import torch\n'), ((14016, 14036), 'torch.matmul', 'torch.matmul', (['xr', 'Sr'], {}), '(xr, Sr)\n', (14028, 14036), False, 'import torch\n'), ((14126, 14151), 'torch.cat', 'torch.cat', (['(z, xS)'], {'dim': '(2)'}), '((z, xS), dim=2)\n', (14135, 14151), False, 'import torch\n'), ((18697, 18716), 'torch.matmul', 'torch.matmul', (['Sk', 'x'], {}), '(Sk, x)\n', (18709, 18716), False, 'import torch\n'), ((18918, 18943), 'torch.cat', 'torch.cat', (['(z, Sx)'], {'dim': '(2)'}), '((z, Sx), dim=2)\n', (18927, 18943), False, 'import torch\n'), ((23414, 23431), 'torch.abs', 'torch.abs', (['S.data'], {}), '(S.data)\n', (23423, 23431), False, 'import torch\n'), ((43556, 43653), 'utils.graphUtils.graphTools.computeNeighborhood', 'graphTools.computeNeighborhood', (['S', 'self.nHops', 'self.nOutputNodes', 'self.nInputNodes', '"""matrix"""'], {}), "(S, self.nHops, self.nOutputNodes, self.\n nInputNodes, 'matrix')\n", (43586, 43653), True, 'import utils.graphUtils.graphTools as graphTools\n'), ((47150, 47184), 'torch.gather', 'torch.gather', (['x', '(2)', 'gatherNeighbor'], {}), '(x, 2, gatherNeighbor)\n', (47162, 47184), False, 'import torch\n'), ((47406, 47434), 'torch.max', 'torch.max', (['xNeighbors'], {'dim': '(3)'}), '(xNeighbors, dim=3)\n', (47415, 47434), False, 'import torch\n'), ((60892, 60926), 'numpy.zeros', 'np.zeros', (['[self.E, self.N, self.N]'], {}), '([self.E, self.N, self.N])\n', (60900, 60926), True, 'import numpy as np\n'), ((60940, 60974), 'numpy.zeros', 'np.zeros', (['[self.E, self.N, self.N]'], {}), '([self.E, self.N, self.N])\n', (60948, 60974), True, 'import numpy as np\n'), ((60992, 61018), 'numpy.zeros', 'np.zeros', (['[self.E, self.N]'], {}), '([self.E, self.N])\n', (61000, 61018), True, 'import numpy as np\n'), ((61100, 61134), 'numpy.zeros', 'np.zeros', (['[self.E, self.N, self.M]'], {}), '([self.E, self.N, self.M])\n', (61108, 61134), True, 'import numpy as np\n'), ((79414, 79467), 'torch.cat', 'torch.cat', (['(multipleIdentity, sparsityPattern)'], {'dim': '(2)'}), '((multipleIdentity, sparsityPattern), dim=2)\n', (79423, 79467), False, 'import torch\n'), ((87762, 87785), 'torch.matmul', 'torch.matmul', (['thisSK', 'S'], {}), '(thisSK, S)\n', (87774, 87785), False, 'import torch\n'), ((97590, 97615), 'torch.cat', 'torch.cat', (['(z, xS)'], {'dim': '(2)'}), '((z, xS), dim=2)\n', (97599, 97615), False, 'import torch\n'), ((107810, 107831), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (107817, 107831), True, 'import torch.nn as nn\n'), ((114139, 114160), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (114146, 114160), True, 'import torch.nn as nn\n'), ((119852, 119873), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (119859, 119873), True, 'import torch.nn as nn\n'), ((9427, 9446), 'torch.matmul', 'torch.matmul', (['VH', 'x'], {}), '(VH, x)\n', (9439, 9446), False, 'import torch\n'), ((28623, 28650), 'torch.Tensor', 'torch.Tensor', (['(1)', '(self.K + 1)'], {}), '(1, self.K + 1)\n', (28635, 28650), False, 'import torch\n'), ((33141, 33169), 'torch.max', 'torch.max', (['xNeighbors'], {'dim': '(3)'}), '(xNeighbors, dim=3)\n', (33150, 33169), False, 'import torch\n'), ((33245, 33266), 'torch.cat', 'torch.cat', (['(xK, v)', '(3)'], {}), '((xK, v), 3)\n', (33254, 33266), False, 'import torch\n'), ((33575, 33592), 'math.sqrt', 'math.sqrt', (['self.K'], {}), '(self.K)\n', (33584, 33592), False, 'import math\n'), ((35550, 35577), 'torch.Tensor', 'torch.Tensor', (['(1)', '(self.K + 1)'], {}), '(1, self.K + 1)\n', (35562, 35577), False, 'import torch\n'), ((36989, 37003), 'torch.empty', 'torch.empty', (['(0)'], {}), '(0)\n', (37000, 37003), False, 'import torch\n'), ((38420, 38450), 'torch.cat', 'torch.cat', (['[xK, kHopMedian]', '(3)'], {}), '([xK, kHopMedian], 3)\n', (38429, 38450), False, 'import torch\n'), ((38775, 38792), 'math.sqrt', 'math.sqrt', (['self.K'], {}), '(self.K)\n', (38784, 38792), False, 'import math\n'), ((50009, 50033), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'K', 'G'], {}), '(F, E, K, G)\n', (50021, 50033), False, 'import torch\n'), ((50359, 50385), 'math.sqrt', 'math.sqrt', (['(self.G * self.K)'], {}), '(self.G * self.K)\n', (50368, 50385), False, 'import math\n'), ((54563, 54587), 'torch.Tensor', 'torch.Tensor', (['H', 'E', 'K', 'G'], {}), '(H, E, K, G)\n', (54575, 54587), False, 'import torch\n'), ((54636, 54660), 'torch.Tensor', 'torch.Tensor', (['H', 'E', 'K', 'H'], {}), '(H, E, K, H)\n', (54648, 54660), False, 'import torch\n'), ((54709, 54733), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'K', 'H'], {}), '(F, E, K, H)\n', (54721, 54733), False, 'import torch\n'), ((55201, 55227), 'math.sqrt', 'math.sqrt', (['(self.G * self.K)'], {}), '(self.G * self.K)\n', (55210, 55227), False, 'import math\n'), ((55395, 55421), 'math.sqrt', 'math.sqrt', (['(self.H * self.K)'], {}), '(self.H * self.K)\n', (55404, 55421), False, 'import math\n'), ((55589, 55615), 'math.sqrt', 'math.sqrt', (['(self.H * self.K)'], {}), '(self.H * self.K)\n', (55598, 55615), False, 'import math\n'), ((59715, 59739), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'G', 'M'], {}), '(F, E, G, M)\n', (59727, 59739), False, 'import torch\n'), ((60065, 60091), 'math.sqrt', 'math.sqrt', (['(self.G * self.M)'], {}), '(self.G * self.M)\n', (60074, 60091), False, 'import math\n'), ((61248, 61275), 'numpy.linalg.eig', 'np.linalg.eig', (['Snp[e, :, :]'], {}), '(Snp[e, :, :])\n', (61261, 61275), True, 'import numpy as np\n'), ((61438, 61482), 'utils.graphUtils.graphTools.splineBasis', 'graphTools.splineBasis', (['self.M', 'Lambda[e, :]'], {}), '(self.M, Lambda[e, :])\n', (61460, 61482), True, 'import utils.graphUtils.graphTools as graphTools\n'), ((63124, 63154), 'torch.cat', 'torch.cat', (['(x, zeroPad)'], {'dim': '(2)'}), '((x, zeroPad), dim=2)\n', (63133, 63154), False, 'import torch\n'), ((66475, 66502), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'K', 'G', 'M'], {}), '(F, E, K, G, M)\n', (66487, 66502), False, 'import torch\n'), ((66828, 66863), 'math.sqrt', 'math.sqrt', (['(self.G * self.K * self.M)'], {}), '(self.G * self.K * self.M)\n', (66837, 66863), False, 'import math\n'), ((68212, 68261), 'utils.graphUtils.graphTools.computeNeighborhood', 'graphTools.computeNeighborhood', (['npS', '(1)'], {'nb': 'self.M'}), '(npS, 1, nb=self.M)\n', (68242, 68261), True, 'import utils.graphUtils.graphTools as graphTools\n'), ((71490, 71540), 'torch.index_select', 'torch.index_select', (['self.weight', '(4)', 'self.copyNodes'], {}), '(self.weight, 4, self.copyNodes)\n', (71508, 71540), False, 'import torch\n'), ((71703, 71733), 'torch.cat', 'torch.cat', (['(x, zeroPad)'], {'dim': '(2)'}), '((x, zeroPad), dim=2)\n', (71712, 71733), False, 'import torch\n'), ((75677, 75707), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'K', 'G', 'N', 'N'], {}), '(F, E, K, G, N, N)\n', (75689, 75707), False, 'import torch\n'), ((76257, 76292), 'math.sqrt', 'math.sqrt', (['(self.G * self.K * self.N)'], {}), '(self.G * self.K * self.N)\n', (76266, 76292), False, 'import math\n'), ((77468, 77496), 'torch.ones', 'torch.ones', (['[self.M, self.N]'], {}), '([self.M, self.N])\n', (77478, 77496), False, 'import torch\n'), ((77575, 77612), 'torch.ones', 'torch.ones', (['[self.N - self.M, self.M]'], {}), '([self.N - self.M, self.M])\n', (77585, 77612), False, 'import torch\n'), ((77674, 77721), 'torch.zeros', 'torch.zeros', (['[self.N - self.M, self.N - self.M]'], {}), '([self.N - self.M, self.N - self.M])\n', (77685, 77721), False, 'import torch\n'), ((77785, 77840), 'torch.cat', 'torch.cat', (['(hybridMaskOnesCols, hybridMaskZeros)'], {'dim': '(1)'}), '((hybridMaskOnesCols, hybridMaskZeros), dim=1)\n', (77794, 77840), False, 'import torch\n'), ((77900, 77950), 'torch.cat', 'torch.cat', (['(hybridMaskOnesRows, hybridMask)'], {'dim': '(0)'}), '((hybridMaskOnesRows, hybridMask), dim=0)\n', (77909, 77950), False, 'import torch\n'), ((77989, 78017), 'torch.ones', 'torch.ones', (['[self.N, self.N]'], {}), '([self.N, self.N])\n', (77999, 78017), False, 'import torch\n'), ((80074, 80104), 'torch.cat', 'torch.cat', (['(x, zeroPad)'], {'dim': '(2)'}), '((x, zeroPad), dim=2)\n', (80083, 80104), False, 'import torch\n'), ((83968, 83993), 'torch.Tensor', 'torch.Tensor', (['K', 'E', '(2 * F)'], {}), '(K, E, 2 * F)\n', (83980, 83993), False, 'import torch\n'), ((84038, 84062), 'torch.Tensor', 'torch.Tensor', (['K', 'E', 'F', 'G'], {}), '(K, E, F, G)\n', (84050, 84062), False, 'import torch\n'), ((84240, 84266), 'math.sqrt', 'math.sqrt', (['(self.G * self.K)'], {}), '(self.G * self.K)\n', (84249, 84266), False, 'import math\n'), ((85814, 85834), 'torch.mean', 'torch.mean', (['y'], {'dim': '(1)'}), '(y, dim=1)\n', (85824, 85834), False, 'import torch\n'), ((100654, 100678), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'K', 'G'], {}), '(F, E, K, G)\n', (100666, 100678), False, 'import torch\n'), ((101004, 101030), 'math.sqrt', 'math.sqrt', (['(self.G * self.K)'], {}), '(self.G * self.K)\n', (101013, 101030), False, 'import math\n'), ((105003, 105027), 'torch.Tensor', 'torch.Tensor', (['H', 'E', 'K', 'G'], {}), '(H, E, K, G)\n', (105015, 105027), False, 'import torch\n'), ((105076, 105100), 'torch.Tensor', 'torch.Tensor', (['H', 'E', 'K', 'H'], {}), '(H, E, K, H)\n', (105088, 105100), False, 'import torch\n'), ((105149, 105173), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'K', 'H'], {}), '(F, E, K, H)\n', (105161, 105173), False, 'import torch\n'), ((105641, 105667), 'math.sqrt', 'math.sqrt', (['(self.G * self.K)'], {}), '(self.G * self.K)\n', (105650, 105667), False, 'import math\n'), ((105835, 105861), 'math.sqrt', 'math.sqrt', (['(self.H * self.K)'], {}), '(self.H * self.K)\n', (105844, 105861), False, 'import math\n'), ((106029, 106055), 'math.sqrt', 'math.sqrt', (['(self.H * self.K)'], {}), '(self.H * self.K)\n', (106038, 106055), False, 'import math\n'), ((111844, 111868), 'torch.Tensor', 'torch.Tensor', (['H', 'E', 'K', 'G'], {}), '(H, E, K, G)\n', (111856, 111868), False, 'import torch\n'), ((111917, 111935), 'torch.Tensor', 'torch.Tensor', (['H', 'H'], {}), '(H, H)\n', (111929, 111935), False, 'import torch\n'), ((111984, 112002), 'torch.Tensor', 'torch.Tensor', (['F', 'H'], {}), '(F, H)\n', (111996, 112002), False, 'import torch\n'), ((112510, 112536), 'math.sqrt', 'math.sqrt', (['(self.G * self.K)'], {}), '(self.G * self.K)\n', (112519, 112536), False, 'import math\n'), ((112704, 112721), 'math.sqrt', 'math.sqrt', (['self.H'], {}), '(self.H)\n', (112713, 112721), False, 'import math\n'), ((112889, 112906), 'math.sqrt', 'math.sqrt', (['self.H'], {}), '(self.H)\n', (112898, 112906), False, 'import math\n'), ((117675, 117699), 'torch.Tensor', 'torch.Tensor', (['H', 'E', 'K', 'G'], {}), '(H, E, K, G)\n', (117687, 117699), False, 'import torch\n'), ((117748, 117766), 'torch.Tensor', 'torch.Tensor', (['H', 'H'], {}), '(H, H)\n', (117760, 117766), False, 'import torch\n'), ((117815, 117833), 'torch.Tensor', 'torch.Tensor', (['F', 'H'], {}), '(F, H)\n', (117827, 117833), False, 'import torch\n'), ((118341, 118367), 'math.sqrt', 'math.sqrt', (['(self.G * self.K)'], {}), '(self.G * self.K)\n', (118350, 118367), False, 'import math\n'), ((118535, 118552), 'math.sqrt', 'math.sqrt', (['self.H'], {}), '(self.H)\n', (118544, 118552), False, 'import math\n'), ((118720, 118737), 'math.sqrt', 'math.sqrt', (['self.H'], {}), '(self.H)\n', (118729, 118737), False, 'import math\n'), ((22226, 22241), 'torch.arange', 'torch.arange', (['F'], {}), '(F)\n', (22238, 22241), False, 'import torch\n'), ((22302, 22324), 'torch.arange', 'torch.arange', (['F', '(2 * F)'], {}), '(F, 2 * F)\n', (22314, 22324), False, 'import torch\n'), ((29281, 29297), 'numpy.array', 'np.array', (['self.S'], {}), '(self.S)\n', (29289, 29297), True, 'import numpy as np\n'), ((29400, 29430), 'torch.tensor', 'torch.tensor', (['thisNeighborhood'], {}), '(thisNeighborhood)\n', (29412, 29430), False, 'import torch\n'), ((36174, 36190), 'numpy.array', 'np.array', (['self.S'], {}), '(self.S)\n', (36182, 36190), True, 'import numpy as np\n'), ((37981, 38030), 'torch.median', 'torch.median', (['xNodeNeighbors'], {'dim': '(2)', 'keepdim': '(True)'}), '(xNodeNeighbors, dim=2, keepdim=True)\n', (37993, 38030), False, 'import torch\n'), ((38165, 38203), 'torch.cat', 'torch.cat', (['[kHopMedian, nodeMedian]', '(2)'], {}), '([kHopMedian, nodeMedian], 2)\n', (38174, 38203), False, 'import torch\n'), ((43832, 43858), 'torch.tensor', 'torch.tensor', (['neighborhood'], {}), '(neighborhood)\n', (43844, 43858), False, 'import torch\n'), ((50099, 50117), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (50111, 50117), False, 'import torch\n'), ((54801, 54819), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (54813, 54819), False, 'import torch\n'), ((54870, 54888), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (54882, 54888), False, 'import torch\n'), ((54939, 54957), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (54951, 54957), False, 'import torch\n'), ((59805, 59823), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (59817, 59823), False, 'import torch\n'), ((66568, 66586), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (66580, 66586), False, 'import torch\n'), ((68913, 68962), 'utils.graphUtils.graphTools.computeNeighborhood', 'graphTools.computeNeighborhood', (['npS', 'K'], {'nb': 'self.M'}), '(npS, K, nb=self.M)\n', (68943, 68962), True, 'import utils.graphUtils.graphTools as graphTools\n'), ((75838, 75862), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'K', 'G'], {}), '(F, E, K, G)\n', (75850, 75862), False, 'import torch\n'), ((75997, 76015), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (76009, 76015), False, 'import torch\n'), ((77056, 77068), 'torch.abs', 'torch.abs', (['S'], {}), '(S)\n', (77065, 77068), False, 'import torch\n'), ((100744, 100762), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (100756, 100762), False, 'import torch\n'), ((105241, 105259), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (105253, 105259), False, 'import torch\n'), ((105310, 105328), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (105322, 105328), False, 'import torch\n'), ((105379, 105397), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (105391, 105397), False, 'import torch\n'), ((112070, 112088), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (112082, 112088), False, 'import torch\n'), ((112139, 112157), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (112151, 112157), False, 'import torch\n'), ((112208, 112226), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (112220, 112226), False, 'import torch\n'), ((117901, 117919), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (117913, 117919), False, 'import torch\n'), ((117970, 117988), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (117982, 117988), False, 'import torch\n'), ((118039, 118057), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (118051, 118057), False, 'import torch\n'), ((37492, 37521), 'numpy.array', 'np.array', (['kHopNeighborhood[n]'], {}), '(kHopNeighborhood[n])\n', (37500, 37521), True, 'import numpy as np\n'), ((70224, 70247), 'torch.tensor', 'torch.tensor', (['copyNodes'], {}), '(copyNodes)\n', (70236, 70247), False, 'import torch\n'), ((80463, 80497), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'dtype': 'uEV.dtype'}), '(0.0, dtype=uEV.dtype)\n', (80475, 80497), False, 'import torch\n'), ((87643, 87655), 'torch.eye', 'torch.eye', (['N'], {}), '(N)\n', (87652, 87655), False, 'import torch\n'), ((17899, 17914), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (17911, 17914), False, 'import torch\n'), ((51597, 51614), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (51609, 51614), False, 'import torch\n'), ((56985, 57002), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (56997, 57002), False, 'import torch\n'), ((61610, 61625), 'torch.tensor', 'torch.tensor', (['V'], {}), '(V)\n', (61622, 61625), False, 'import torch\n'), ((61683, 61699), 'torch.tensor', 'torch.tensor', (['VH'], {}), '(VH)\n', (61695, 61699), False, 'import torch\n'), ((61767, 61793), 'torch.tensor', 'torch.tensor', (['splineKernel'], {}), '(splineKernel)\n', (61779, 61793), False, 'import torch\n'), ((63578, 63595), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (63590, 63595), False, 'import torch\n'), ((70385, 70405), 'torch.arange', 'torch.arange', (['self.M'], {}), '(self.M)\n', (70397, 70405), False, 'import torch\n'), ((70711, 70731), 'torch.arange', 'torch.arange', (['self.N'], {}), '(self.N)\n', (70723, 70731), False, 'import torch\n'), ((72142, 72159), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (72154, 72159), False, 'import torch\n'), ((80879, 80896), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (80891, 80896), False, 'import torch\n'), ((86002, 86019), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (86014, 86019), False, 'import torch\n'), ((102251, 102268), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (102263, 102268), False, 'import torch\n'), ((108367, 108384), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (108379, 108384), False, 'import torch\n'), ((114889, 114906), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (114901, 114906), False, 'import torch\n'), ((120505, 120522), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (120517, 120522), False, 'import torch\n'), ((18453, 18468), 'torch.tensor', 'torch.tensor', (['k'], {}), '(k)\n', (18465, 18468), False, 'import torch\n'), ((63051, 63082), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (63062, 63082), False, 'import torch\n'), ((71630, 71661), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (71641, 71661), False, 'import torch\n'), ((80001, 80032), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (80012, 80032), False, 'import torch\n'), ((21566, 21578), 'torch.eye', 'torch.eye', (['N'], {}), '(N)\n', (21575, 21578), False, 'import torch\n'), ((76867, 76884), 'torch.eye', 'torch.eye', (['self.N'], {}), '(self.N)\n', (76876, 76884), False, 'import torch\n'), ((51053, 51084), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (51064, 51084), False, 'import torch\n'), ((56300, 56331), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (56311, 56331), False, 'import torch\n'), ((84897, 84928), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (84908, 84928), False, 'import torch\n'), ((101702, 101733), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (101713, 101733), False, 'import torch\n'), ((107482, 107513), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (107493, 107513), False, 'import torch\n'), ((113680, 113711), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (113691, 113711), False, 'import torch\n'), ((119511, 119542), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (119522, 119542), False, 'import torch\n')]
|
import array
import math
import chuong_1
import chuong_2
def bien_ap_t1():
s_max=abs(chuong_2.s_a)
s_dm_B=s_max/1.4
d_p_n=260
d_p_0=100
u_n=14
i_0=0.045
r_b1=d_p_n*110**2/20000**2*10**3
z_b1=u_n*110**2/20000*10
x_b1=math.sqrt(z_b1**2-r_b1**2)
d_q_FE=i_0*20000/100
print('S_ptmax1:',s_max,'\nS_dm_B>=',s_dm_B)
print('==> Chon 2 may bien ap 20000/110 kV')
print('R_B1:',r_b1)
print('Z_B1:',z_b1)
print('X_B1:',x_b1)
print('ΔQ_FE:',d_q_FE)
print()
def bien_ap_t2():
s_max=abs(chuong_2.s_b)
s_dm_B=s_max/1.4
d_p_n=260
d_p_0=100
u_n=14
i_0=0.045
r_b1=d_p_n*110**2/20000**2*10**3
z_b1=u_n*110**2/20000*10
x_b1=math.sqrt(z_b1**2-r_b1**2)
d_q_FE=i_0*20000/100
print('S_ptmax2:',s_max,'\nS_dm_B>=',s_dm_B)
print('==> Chon 2 may bien ap 20000/110 kV')
print('R_B2:',r_b1)
print('Z_B2:',z_b1)
print('X_B2:',x_b1)
print('ΔQ_FE:',d_q_FE)
print()
def bien_ap_t3():
s_max=chuong_1.load_3[0]/chuong_1.load_3[2]
d_p_n=220
d_p_0=115
u_n=14
i_0=0.042
r_b1=d_p_n*110**2/31500**2*10**3
z_b1=u_n*110**2/31500*10
x_b1=math.sqrt(z_b1**2-r_b1**2)
d_q_FE=i_0*31500/100
print('S_ptmax3:',s_max,'\nS_dm_B>=',s_max)
print('==> Chon 1 may bien ap 31500/110 kV')
print('R_B3:',r_b1)
print('Z_B3:',z_b1)
print('X_B3:',x_b1)
print('ΔQ_FE:',d_q_FE)
print()
|
[
"math.sqrt"
] |
[((252, 284), 'math.sqrt', 'math.sqrt', (['(z_b1 ** 2 - r_b1 ** 2)'], {}), '(z_b1 ** 2 - r_b1 ** 2)\n', (261, 284), False, 'import math\n'), ((709, 741), 'math.sqrt', 'math.sqrt', (['(z_b1 ** 2 - r_b1 ** 2)'], {}), '(z_b1 ** 2 - r_b1 ** 2)\n', (718, 741), False, 'import math\n'), ((1164, 1196), 'math.sqrt', 'math.sqrt', (['(z_b1 ** 2 - r_b1 ** 2)'], {}), '(z_b1 ** 2 - r_b1 ** 2)\n', (1173, 1196), False, 'import math\n')]
|
# Useful starting lines
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# %load_ext autoreload
# %autoreload 2
from sklearn import linear_model
# from __future__ import absolute_import
from labs.ex03.template import helpers
from labs.ex04.template.costs import compute_rmse, compute_mse
from labs.ex04.template.costs import compute_mse_for_ridge
from labs.ex04.template.ridge_regression import ridge_regression
from labs.ex04.template.build_polynomial import build_poly
from labs.ex04.template.plots import cross_validation_visualization
from labs.ex04.template.plots import cross_validation_visualization_for_degree
from labs.ex04.template.least_squares import least_squares
from labs.ex04.template.split_data import split_data
from labs.ex04.template.plots import bias_variance_decomposition_visualization
# load dataset
def data_load():
''' Return x, y '''
return helpers.load_data()
def build_k_indices(y, k_fold, seed):
"""build k indices for k-fold."""
num_row = y.shape[0]
interval = int(num_row / k_fold)
np.random.seed(seed)
indices = np.random.permutation(num_row)
k_indices = [indices[k * interval: (k + 1) * interval]
for k in range(k_fold)]
return np.array(k_indices)
def cross_validation(y, x, k_indices, k, lamb, degree, rmse=False):
"""return the loss of ridge regression."""
# ***************************************************
# Split data into K groups according to indices
# get k'th subgroup in test, others in train:
# ***************************************************
x = np.array(x)
y = np.array(y)
train_ind = np.concatenate((k_indices[:k], k_indices[k+1:]), axis=0)
train_ind = np.reshape(train_ind, (train_ind.size,))
test_ind = k_indices[k]
# Note: different from np.ndarray, tuple is name[index,]
# ndarray is name[index,:]
train_x = x[train_ind,]
train_y = y[train_ind,]
test_x = x[test_ind,]
test_y = y[test_ind,]
# ***************************************************
# INSERT YOUR CODE HERE
# form data with polynomial degree:
# ***************************************************
train_x = build_poly(train_x, degree)
test_x = build_poly(test_x, degree)
# ***************************************************
# INSERT YOUR CODE HERE
# ridge regression:
# ***************************************************
loss_tr, weight = ridge_regression(train_y, train_x, lamb)
# Test with sklearn ridge solve.
clf = linear_model.ridge_regression(train_x, train_y, alpha=lamb)
# weight = clf
# ***************************************************
# INSERT YOUR CODE HERE
# calculate the loss for train and test data: TODO
# ***************************************************
''' Compute MSE by ridge weights '''
loss_tr = compute_mse_for_ridge(train_y, train_x, weight,lamb)
loss_te = compute_mse_for_ridge(test_y, test_x, weight, lamb)
# loss_tr = compute_mse(train_y, train_x, weight)
# loss_te = compute_mse(test_y, test_x, weight)
if rmse is True:
loss_tr = compute_rmse(loss_tr)
loss_te = compute_rmse(loss_te)
return loss_tr, loss_te
def cross_validation_demo():
seed = 1
degree = 7
k_fold = 4
lambdas = np.logspace(-4, 2, 30)
y,x = data_load()
# split data in k fold
k_indices = build_k_indices(y, k_fold, seed)
# define lists to store the loss of training data and test data
mse_tr = []
mse_te = []
# ***************************************************
# INSERT YOUR CODE HERE
# cross validation:
# ***************************************************
for lamb in lambdas:
_mse_tr = []
_mse_te = []
for k in range(k_fold):
loss_tr, loss_te = cross_validation(y,x,k_indices,k,lamb,degree, rmse=True)
_mse_tr += [loss_tr]
_mse_te += [loss_te]
avg_tr = np.average(_mse_tr)
avg_te = np.average(_mse_te)
mse_tr += [avg_tr]
mse_te += [avg_te]
cross_validation_visualization(lambdas, mse_tr, mse_te)
print(mse_tr, mse_te)
def cross_validation_demo_degree():
seed = 1
degrees = range(2,11)
k_fold = 4
lamb = 0.5
y,x = data_load()
# split data in k fold
k_indices = build_k_indices(y, k_fold, seed)
# define lists to store the loss of training data and test data
mse_tr = []
mse_te = []
# ***************************************************
# INSERT YOUR CODE HERE
# cross validation:
# ***************************************************
for degree in degrees:
_mse_tr = []
_mse_te = []
for k in range(k_fold):
loss_tr, loss_te = cross_validation(y,x,k_indices,k,lamb, degree, rmse=True)
_mse_tr += [loss_tr]
_mse_te += [loss_te]
avg_tr = np.average(_mse_tr)
avg_te = np.average(_mse_te)
mse_tr += [avg_tr]
mse_te += [avg_te]
cross_validation_visualization_for_degree(degrees, mse_tr, mse_te)
print(mse_tr, mse_te)
def bias_variance2(y, x, weight, variance_e):
'''
For linear model bias-variance calculation. The dimension is len(weight)
:param y:
:param x:
:param weight: beta of linear model
:param function:
:param variance_e:
:return:
'''
# N = len(x)
# res = np.dot(x, weight)
# error = variance_e * (len(weight) / N) + np.sum( (y - np.dot(x, weight)) **2 )/ N
# return compute_rmse(error)
return compute_rmse(compute_mse(y,x,weight) + 1 + len(weight)/ len(x))
def bias_variance(function, x, weight, variance_e):
'''
For linear model bias-variance calculation. The dimension is len(weight)
:param y:
:param x:
:param weight: beta of linear model
:param function:
:param variance_e:
:return:
'''
y = function(x[:,1])
# N = len(x)
# res = np.dot(x, weight)
# error = variance_e * (len(weight) / N) + np.sum( (y - np.dot(x, weight)) **2 )/ N
# return compute_rmse(error)
return compute_rmse(compute_mse(y,x,weight))
def bias_variance_demo():
"""The entry."""
# define parameters
seeds = range(100)
num_data = 10000
ratio_train = 0.005
degrees = range(1, 10)
# define list to store the variable
rmse_tr = np.empty((len(seeds), len(degrees)))
rmse_te = np.empty((len(seeds), len(degrees)))
for index_seed, seed in enumerate(seeds):
np.random.seed(seed)
x = np.linspace(0.1, 2 * np.pi, num_data)
y = np.sin(x) + 0.3 * np.random.randn(num_data).T
# ***************************************************
# INSERT YOUR CODE HERE
# split data with a specific seed: TODO
# ***************************************************
train_x, train_y, test_x, test_y = split_data(x,y,ratio_train,seed)
# ***************************************************
# INSERT YOUR CODE HERE
# bias_variance_decomposition: TODO
# ***************************************************
for ind_degree, degree in enumerate(degrees):
# Use least square
x_tr = build_poly(train_x, degree)
x_te = build_poly(test_x, degree)
mse, weight = least_squares(train_y, x_tr)
rmse_tr[index_seed][ind_degree] = bias_variance(np.sin, x_tr, weight, 1)
rmse_te[index_seed][ind_degree] = bias_variance(np.sin, x_te, weight, 1)
# rmse_tr[index_seed][ind_degree] = bias_variance2(train_y, x_tr, weight, 1)
# rmse_te[index_seed][ind_degree] = bias_variance2(test_y, x_te, weight, 1)
bias_variance_decomposition_visualization(degrees, rmse_tr, rmse_te)
# cross_validation_demo()
# degree = 5.
# cross_validation_demo_degree()
bias_variance_demo()
print()
|
[
"numpy.random.seed",
"numpy.logspace",
"labs.ex04.template.plots.cross_validation_visualization_for_degree",
"labs.ex04.template.costs.compute_mse_for_ridge",
"numpy.sin",
"labs.ex04.template.plots.bias_variance_decomposition_visualization",
"labs.ex04.template.ridge_regression.ridge_regression",
"labs.ex04.template.split_data.split_data",
"numpy.random.randn",
"numpy.reshape",
"numpy.linspace",
"numpy.average",
"labs.ex04.template.plots.cross_validation_visualization",
"labs.ex04.template.build_polynomial.build_poly",
"labs.ex03.template.helpers.load_data",
"numpy.random.permutation",
"labs.ex04.template.costs.compute_rmse",
"numpy.concatenate",
"labs.ex04.template.least_squares.least_squares",
"labs.ex04.template.costs.compute_mse",
"numpy.array",
"sklearn.linear_model.ridge_regression"
] |
[((904, 923), 'labs.ex03.template.helpers.load_data', 'helpers.load_data', ([], {}), '()\n', (921, 923), False, 'from labs.ex03.template import helpers\n'), ((1067, 1087), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1081, 1087), True, 'import numpy as np\n'), ((1102, 1132), 'numpy.random.permutation', 'np.random.permutation', (['num_row'], {}), '(num_row)\n', (1123, 1132), True, 'import numpy as np\n'), ((1244, 1263), 'numpy.array', 'np.array', (['k_indices'], {}), '(k_indices)\n', (1252, 1263), True, 'import numpy as np\n'), ((1606, 1617), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1614, 1617), True, 'import numpy as np\n'), ((1626, 1637), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1634, 1637), True, 'import numpy as np\n'), ((1655, 1713), 'numpy.concatenate', 'np.concatenate', (['(k_indices[:k], k_indices[k + 1:])'], {'axis': '(0)'}), '((k_indices[:k], k_indices[k + 1:]), axis=0)\n', (1669, 1713), True, 'import numpy as np\n'), ((1728, 1768), 'numpy.reshape', 'np.reshape', (['train_ind', '(train_ind.size,)'], {}), '(train_ind, (train_ind.size,))\n', (1738, 1768), True, 'import numpy as np\n'), ((2197, 2224), 'labs.ex04.template.build_polynomial.build_poly', 'build_poly', (['train_x', 'degree'], {}), '(train_x, degree)\n', (2207, 2224), False, 'from labs.ex04.template.build_polynomial import build_poly\n'), ((2238, 2264), 'labs.ex04.template.build_polynomial.build_poly', 'build_poly', (['test_x', 'degree'], {}), '(test_x, degree)\n', (2248, 2264), False, 'from labs.ex04.template.build_polynomial import build_poly\n'), ((2456, 2496), 'labs.ex04.template.ridge_regression.ridge_regression', 'ridge_regression', (['train_y', 'train_x', 'lamb'], {}), '(train_y, train_x, lamb)\n', (2472, 2496), False, 'from labs.ex04.template.ridge_regression import ridge_regression\n'), ((2545, 2604), 'sklearn.linear_model.ridge_regression', 'linear_model.ridge_regression', (['train_x', 'train_y'], {'alpha': 'lamb'}), '(train_x, train_y, alpha=lamb)\n', (2574, 2604), False, 'from sklearn import linear_model\n'), ((2879, 2932), 'labs.ex04.template.costs.compute_mse_for_ridge', 'compute_mse_for_ridge', (['train_y', 'train_x', 'weight', 'lamb'], {}), '(train_y, train_x, weight, lamb)\n', (2900, 2932), False, 'from labs.ex04.template.costs import compute_mse_for_ridge\n'), ((2946, 2997), 'labs.ex04.template.costs.compute_mse_for_ridge', 'compute_mse_for_ridge', (['test_y', 'test_x', 'weight', 'lamb'], {}), '(test_y, test_x, weight, lamb)\n', (2967, 2997), False, 'from labs.ex04.template.costs import compute_mse_for_ridge\n'), ((3321, 3343), 'numpy.logspace', 'np.logspace', (['(-4)', '(2)', '(30)'], {}), '(-4, 2, 30)\n', (3332, 3343), True, 'import numpy as np\n'), ((4096, 4151), 'labs.ex04.template.plots.cross_validation_visualization', 'cross_validation_visualization', (['lambdas', 'mse_tr', 'mse_te'], {}), '(lambdas, mse_tr, mse_te)\n', (4126, 4151), False, 'from labs.ex04.template.plots import cross_validation_visualization\n'), ((5039, 5105), 'labs.ex04.template.plots.cross_validation_visualization_for_degree', 'cross_validation_visualization_for_degree', (['degrees', 'mse_tr', 'mse_te'], {}), '(degrees, mse_tr, mse_te)\n', (5080, 5105), False, 'from labs.ex04.template.plots import cross_validation_visualization_for_degree\n'), ((7713, 7781), 'labs.ex04.template.plots.bias_variance_decomposition_visualization', 'bias_variance_decomposition_visualization', (['degrees', 'rmse_tr', 'rmse_te'], {}), '(degrees, rmse_tr, rmse_te)\n', (7754, 7781), False, 'from labs.ex04.template.plots import bias_variance_decomposition_visualization\n'), ((3144, 3165), 'labs.ex04.template.costs.compute_rmse', 'compute_rmse', (['loss_tr'], {}), '(loss_tr)\n', (3156, 3165), False, 'from labs.ex04.template.costs import compute_rmse, compute_mse\n'), ((3184, 3205), 'labs.ex04.template.costs.compute_rmse', 'compute_rmse', (['loss_te'], {}), '(loss_te)\n', (3196, 3205), False, 'from labs.ex04.template.costs import compute_rmse, compute_mse\n'), ((3980, 3999), 'numpy.average', 'np.average', (['_mse_tr'], {}), '(_mse_tr)\n', (3990, 3999), True, 'import numpy as np\n'), ((4017, 4036), 'numpy.average', 'np.average', (['_mse_te'], {}), '(_mse_te)\n', (4027, 4036), True, 'import numpy as np\n'), ((4923, 4942), 'numpy.average', 'np.average', (['_mse_tr'], {}), '(_mse_tr)\n', (4933, 4942), True, 'import numpy as np\n'), ((4960, 4979), 'numpy.average', 'np.average', (['_mse_te'], {}), '(_mse_te)\n', (4970, 4979), True, 'import numpy as np\n'), ((6129, 6154), 'labs.ex04.template.costs.compute_mse', 'compute_mse', (['y', 'x', 'weight'], {}), '(y, x, weight)\n', (6140, 6154), False, 'from labs.ex04.template.costs import compute_rmse, compute_mse\n'), ((6519, 6539), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6533, 6539), True, 'import numpy as np\n'), ((6552, 6589), 'numpy.linspace', 'np.linspace', (['(0.1)', '(2 * np.pi)', 'num_data'], {}), '(0.1, 2 * np.pi, num_data)\n', (6563, 6589), True, 'import numpy as np\n'), ((6895, 6930), 'labs.ex04.template.split_data.split_data', 'split_data', (['x', 'y', 'ratio_train', 'seed'], {}), '(x, y, ratio_train, seed)\n', (6905, 6930), False, 'from labs.ex04.template.split_data import split_data\n'), ((6602, 6611), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (6608, 6611), True, 'import numpy as np\n'), ((7232, 7259), 'labs.ex04.template.build_polynomial.build_poly', 'build_poly', (['train_x', 'degree'], {}), '(train_x, degree)\n', (7242, 7259), False, 'from labs.ex04.template.build_polynomial import build_poly\n'), ((7279, 7305), 'labs.ex04.template.build_polynomial.build_poly', 'build_poly', (['test_x', 'degree'], {}), '(test_x, degree)\n', (7289, 7305), False, 'from labs.ex04.template.build_polynomial import build_poly\n'), ((7332, 7360), 'labs.ex04.template.least_squares.least_squares', 'least_squares', (['train_y', 'x_tr'], {}), '(train_y, x_tr)\n', (7345, 7360), False, 'from labs.ex04.template.least_squares import least_squares\n'), ((5589, 5614), 'labs.ex04.template.costs.compute_mse', 'compute_mse', (['y', 'x', 'weight'], {}), '(y, x, weight)\n', (5600, 5614), False, 'from labs.ex04.template.costs import compute_rmse, compute_mse\n'), ((6620, 6645), 'numpy.random.randn', 'np.random.randn', (['num_data'], {}), '(num_data)\n', (6635, 6645), True, 'import numpy as np\n')]
|
import time
from roslibpy import Topic
from compas_fab.backends import RosClient
def receive_message(message):
print("Received: " + message["data"])
with RosClient("localhost") as client:
print("Waiting for messages...")
listener = Topic(client, "/messages", "std_msgs/String")
listener.subscribe(receive_message)
while client.is_connected:
time.sleep(1)
|
[
"roslibpy.Topic",
"compas_fab.backends.RosClient",
"time.sleep"
] |
[((163, 185), 'compas_fab.backends.RosClient', 'RosClient', (['"""localhost"""'], {}), "('localhost')\n", (172, 185), False, 'from compas_fab.backends import RosClient\n'), ((250, 295), 'roslibpy.Topic', 'Topic', (['client', '"""/messages"""', '"""std_msgs/String"""'], {}), "(client, '/messages', 'std_msgs/String')\n", (255, 295), False, 'from roslibpy import Topic\n'), ((376, 389), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (386, 389), False, 'import time\n')]
|
import os
import sys
sys.path.insert(0, os.getcwd())
try:
from easygui_qt import easygui_qt
except ImportError:
print("problem with import")
name = easygui_qt.text_input(message="What is your name?",
title="Mine is Reeborg.")
print(name, end='')
|
[
"os.getcwd",
"easygui_qt.easygui_qt.text_input"
] |
[((158, 235), 'easygui_qt.easygui_qt.text_input', 'easygui_qt.text_input', ([], {'message': '"""What is your name?"""', 'title': '"""Mine is Reeborg."""'}), "(message='What is your name?', title='Mine is Reeborg.')\n", (179, 235), False, 'from easygui_qt import easygui_qt\n'), ((40, 51), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (49, 51), False, 'import os\n')]
|
from django import forms
from imagr_users.models import ImagrUser
from registration.forms import RegistrationForm
class ImagrUserRegistrationForm(RegistrationForm):
def clean_username(self):
"""Validate that the username is alphanumeric and is not already in use.
"""
existing = ImagrUser.objects.filter(
username__iexact=self.cleaned_data['username']
)
if existing.exists():
raise forms.ValidationError(
"A user with that username already exists."
)
else:
return self.cleaned_data['username']
|
[
"django.forms.ValidationError",
"imagr_users.models.ImagrUser.objects.filter"
] |
[((309, 381), 'imagr_users.models.ImagrUser.objects.filter', 'ImagrUser.objects.filter', ([], {'username__iexact': "self.cleaned_data['username']"}), "(username__iexact=self.cleaned_data['username'])\n", (333, 381), False, 'from imagr_users.models import ImagrUser\n'), ((452, 518), 'django.forms.ValidationError', 'forms.ValidationError', (['"""A user with that username already exists."""'], {}), "('A user with that username already exists.')\n", (473, 518), False, 'from django import forms\n')]
|
from typing import Optional
import pytest
import hopeit.app.api as api
from hopeit.app.config import EventType
from hopeit.server.api import APIError
from mock_app import mock_app_api_get, MockData, mock_app_api_post, mock_app_api_get_list
from mock_app import mock_api_app_config, mock_api_spec # noqa: F401
def test_api_from_config(monkeypatch, mock_api_spec, mock_api_app_config): # noqa: F811
monkeypatch.setattr(api, 'spec', mock_api_spec)
spec = api.api_from_config(
mock_app_api_get, app_config=mock_api_app_config, event_name='mock-app-api-get', plugin=None)
assert spec == mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['get']
def test_api_from_config_missing(monkeypatch, mock_api_spec, mock_api_app_config): # noqa: F811
monkeypatch.setattr(api, 'spec', mock_api_spec)
with pytest.raises(APIError):
api.api_from_config(
mock_app_api_get, app_config=mock_api_app_config, event_name='mock-app-noapi', plugin=None)
mock_api_app_config.events['mock-app-api-get-list'].type = EventType.POST
with pytest.raises(APIError):
api.api_from_config(
mock_app_api_get_list, app_config=mock_api_app_config, event_name='mock-app-api-get-list', plugin=None)
def test_event_api(monkeypatch, mock_api_spec, mock_api_app_config): # noqa: F811
monkeypatch.setattr(api, 'spec', mock_api_spec)
spec = api.event_api(
description="Test app api",
payload=(str, "Payload"),
query_args=[('arg1', Optional[int], "Argument 1")],
responses={200: (MockData, "MockData result")}
)(mock_app_api_get, app_config=mock_api_app_config, event_name='mock-app-api-get', plugin=None)
assert spec['description'] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['get']['description']
assert spec['parameters'][0] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['get']['parameters'][0]
assert spec['responses'] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['get']['responses']
def test_event_api_post(monkeypatch, mock_api_spec, mock_api_app_config): # noqa: F811
monkeypatch.setattr(api, 'spec', mock_api_spec)
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['post']['parameters'][0]['description'] = \
'arg1'
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['post']['requestBody']['description'] = \
'MockData'
spec = api.event_api(
description="Description Test app api part 2",
payload=MockData,
query_args=['arg1'],
responses={200: int}
)(mock_app_api_post, app_config=mock_api_app_config, event_name='mock-app-api-post', plugin=None)
assert spec['summary'] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['post']['summary']
assert spec['description'] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['post']['description']
assert spec['parameters'][0] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['post']['parameters'][0]
assert spec['requestBody'] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['post']['requestBody']
assert spec['responses'] == \
mock_api_spec['paths']['/api/mock-app-api/test/mock-app-api']['post']['responses']
def test_app_base_route_name(mock_api_app_config): # noqa: F811
assert api.app_base_route_name(mock_api_app_config.app) == "/api/mock-app-api/test"
assert api.app_base_route_name(mock_api_app_config.app, plugin=mock_api_app_config.app) == \
"/api/mock-app-api/test/mock-app-api/test"
|
[
"hopeit.app.api.api_from_config",
"pytest.raises",
"hopeit.app.api.event_api",
"hopeit.app.api.app_base_route_name"
] |
[((466, 583), 'hopeit.app.api.api_from_config', 'api.api_from_config', (['mock_app_api_get'], {'app_config': 'mock_api_app_config', 'event_name': '"""mock-app-api-get"""', 'plugin': 'None'}), "(mock_app_api_get, app_config=mock_api_app_config,\n event_name='mock-app-api-get', plugin=None)\n", (485, 583), True, 'import hopeit.app.api as api\n'), ((837, 860), 'pytest.raises', 'pytest.raises', (['APIError'], {}), '(APIError)\n', (850, 860), False, 'import pytest\n'), ((870, 985), 'hopeit.app.api.api_from_config', 'api.api_from_config', (['mock_app_api_get'], {'app_config': 'mock_api_app_config', 'event_name': '"""mock-app-noapi"""', 'plugin': 'None'}), "(mock_app_api_get, app_config=mock_api_app_config,\n event_name='mock-app-noapi', plugin=None)\n", (889, 985), True, 'import hopeit.app.api as api\n'), ((1083, 1106), 'pytest.raises', 'pytest.raises', (['APIError'], {}), '(APIError)\n', (1096, 1106), False, 'import pytest\n'), ((1116, 1243), 'hopeit.app.api.api_from_config', 'api.api_from_config', (['mock_app_api_get_list'], {'app_config': 'mock_api_app_config', 'event_name': '"""mock-app-api-get-list"""', 'plugin': 'None'}), "(mock_app_api_get_list, app_config=mock_api_app_config,\n event_name='mock-app-api-get-list', plugin=None)\n", (1135, 1243), True, 'import hopeit.app.api as api\n'), ((1401, 1579), 'hopeit.app.api.event_api', 'api.event_api', ([], {'description': '"""Test app api"""', 'payload': "(str, 'Payload')", 'query_args': "[('arg1', Optional[int], 'Argument 1')]", 'responses': "{(200): (MockData, 'MockData result')}"}), "(description='Test app api', payload=(str, 'Payload'),\n query_args=[('arg1', Optional[int], 'Argument 1')], responses={(200): (\n MockData, 'MockData result')})\n", (1414, 1579), True, 'import hopeit.app.api as api\n'), ((2490, 2618), 'hopeit.app.api.event_api', 'api.event_api', ([], {'description': '"""Description Test app api part 2"""', 'payload': 'MockData', 'query_args': "['arg1']", 'responses': '{(200): int}'}), "(description='Description Test app api part 2', payload=\n MockData, query_args=['arg1'], responses={(200): int})\n", (2503, 2618), True, 'import hopeit.app.api as api\n'), ((3461, 3509), 'hopeit.app.api.app_base_route_name', 'api.app_base_route_name', (['mock_api_app_config.app'], {}), '(mock_api_app_config.app)\n', (3484, 3509), True, 'import hopeit.app.api as api\n'), ((3549, 3634), 'hopeit.app.api.app_base_route_name', 'api.app_base_route_name', (['mock_api_app_config.app'], {'plugin': 'mock_api_app_config.app'}), '(mock_api_app_config.app, plugin=mock_api_app_config.app\n )\n', (3572, 3634), True, 'import hopeit.app.api as api\n')]
|
import asyncio
from urllib.parse import urlencode
import pytest
from phx_events.client import PHXChannelsClient
pytestmark = pytest.mark.asyncio
class TestPHXChannelsClientInit:
def setup(self):
self.socket_url = 'ws://test.socket/url/'
self.channel_auth_token = '<PASSWORD>'
self.phx_channels_client = PHXChannelsClient(self.socket_url, self.channel_auth_token)
def test_async_logger_child_set_as_logger_on_client(self):
from phx_events.async_logger import async_logger
assert self.phx_channels_client.logger.parent == async_logger
def test_channel_socket_url_has_token_if_specified(self):
no_token_client = PHXChannelsClient(self.socket_url)
assert no_token_client.channel_socket_url == self.socket_url
assert self.phx_channels_client.channel_socket_url == f'{self.socket_url}?token={self.channel_auth_token}'
def test_channel_socket_url_token_is_made_url_safe(self):
unsafe_token = '==??=='
safe_token_client = PHXChannelsClient(self.socket_url, channel_auth_token=unsafe_token)
assert safe_token_client.channel_socket_url != f'{self.socket_url}?token={unsafe_token}'
assert safe_token_client.channel_socket_url == f'{self.socket_url}?{urlencode({"token": unsafe_token})}'
def test_event_loop_set_by_default_if_not_specified(self):
no_loop_specified_client = PHXChannelsClient(self.socket_url)
assert isinstance(no_loop_specified_client._loop, asyncio.BaseEventLoop)
def test_event_loop_set_to_argument_if_specified(self):
event_loop = asyncio.get_event_loop()
specified_loop_client = PHXChannelsClient(self.socket_url, event_loop=event_loop)
assert specified_loop_client._loop == event_loop
|
[
"phx_events.client.PHXChannelsClient",
"asyncio.get_event_loop",
"urllib.parse.urlencode"
] |
[((338, 397), 'phx_events.client.PHXChannelsClient', 'PHXChannelsClient', (['self.socket_url', 'self.channel_auth_token'], {}), '(self.socket_url, self.channel_auth_token)\n', (355, 397), False, 'from phx_events.client import PHXChannelsClient\n'), ((679, 713), 'phx_events.client.PHXChannelsClient', 'PHXChannelsClient', (['self.socket_url'], {}), '(self.socket_url)\n', (696, 713), False, 'from phx_events.client import PHXChannelsClient\n'), ((1022, 1089), 'phx_events.client.PHXChannelsClient', 'PHXChannelsClient', (['self.socket_url'], {'channel_auth_token': 'unsafe_token'}), '(self.socket_url, channel_auth_token=unsafe_token)\n', (1039, 1089), False, 'from phx_events.client import PHXChannelsClient\n'), ((1400, 1434), 'phx_events.client.PHXChannelsClient', 'PHXChannelsClient', (['self.socket_url'], {}), '(self.socket_url)\n', (1417, 1434), False, 'from phx_events.client import PHXChannelsClient\n'), ((1599, 1623), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1621, 1623), False, 'import asyncio\n'), ((1656, 1713), 'phx_events.client.PHXChannelsClient', 'PHXChannelsClient', (['self.socket_url'], {'event_loop': 'event_loop'}), '(self.socket_url, event_loop=event_loop)\n', (1673, 1713), False, 'from phx_events.client import PHXChannelsClient\n'), ((1264, 1298), 'urllib.parse.urlencode', 'urlencode', (["{'token': unsafe_token}"], {}), "({'token': unsafe_token})\n", (1273, 1298), False, 'from urllib.parse import urlencode\n')]
|
"""Vertical structure functions for ROMS
:func:`sdepth`
Depth of s-levels
:func:`zslice`
Slice a 3D field in s-coordinates to fixed depth
:func:`multi_zslice`
Slice a 3D field to several depth levels
:func:`z_average`
Vertical average of a 3D field
:func:`s_stretch`
Compute vertical stretching arrays Cs_r or Cs_w
"""
# -----------------------------------
# <NAME> <<EMAIL>>
# Institute of Marine Research
# Bergen, Norway
# 2010-09-30
# -----------------------------------
from typing import Union, List
import numpy as np
import xarray as xr
Surface = Union[float, np.ndarray] # Surface z = ....
def sdepth(H, Hc, C, stagger="rho", Vtransform=1):
"""Depth of s-levels
*H* : arraylike
Bottom depths [meter, positive]
*Hc* : scalar
Critical depth
*cs_r* : 1D array
s-level stretching curve
*stagger* : [ 'rho' | 'w' ]
*Vtransform* : [ 1 | 2 ]
defines the transform used, defaults 1 = Song-Haidvogel
Returns an array with ndim = H.ndim + 1 and
shape = cs_r.shape + H.shape with the depths of the
mid-points in the s-levels.
Typical usage::
>>> fid = Dataset(roms_file)
>>> H = fid.variables['h'][:, :]
>>> C = fid.variables['Cs_r'][:]
>>> Hc = fid.variables['hc'].getValue()
>>> z_rho = sdepth(H, Hc, C)
"""
H = np.asarray(H)
Hshape = H.shape # Save the shape of H
H = H.ravel() # and make H 1D for easy shape maniplation
C = np.asarray(C)
N = len(C)
outshape = (N,) + Hshape # Shape of output
if stagger == "rho":
S = -1.0 + (0.5 + np.arange(N)) / N # Unstretched coordinates
elif stagger == "w":
S = np.linspace(-1.0, 0.0, N)
else:
raise ValueError("stagger must be 'rho' or 'w'")
if Vtransform == 1: # Default transform by Song and Haidvogel
A = Hc * (S - C)[:, None]
B = np.outer(C, H)
return (A + B).reshape(outshape)
elif Vtransform == 2: # New transform by Shchepetkin
N = Hc * S[:, None] + np.outer(C, H)
D = 1.0 + Hc / H
return (N / D).reshape(outshape)
else:
raise ValueError("Unknown Vtransform")
# ------------------------------------
def sdepth_w(H, Hc, cs_w):
"""Return depth of w-points in s-levels
Kept for backwards compatibility
use *sdepth(H, Hc, cs_w, stagger='w')* instead
"""
return sdepth(H, Hc, cs_w, stagger="w")
# ------------------------------------------
# Vertical slicing e.t.c.
# ------------------------------------------
def zslice2(F, S, z):
"""Vertical slice of a 3D ROMS field
Vertical interpolation of a field in s-coordinates to
(possibly varying) depth level
*F* : array with vertical profiles, first dimension is vertical
*S* : array with depths of the F-values,
*z* : Depth level(s) for output, scalar or ``shape = F.shape[1:]``
The z values should be negative
Return value : array, `shape = F.shape[1:]`, the vertical slice
Example:
H is an array of depths (positive values)
Hc is the critical depth
C is 1D containing the s-coordinate stretching at rho-points
returns F50, interpolated values at 50 meter with F50.shape = H.shape
>>> z_rho = sdepth(H, Hc, C)
>>> F50 = zslice(F, z_rho, -50.0)
"""
# TODO:
# Option to Save A, D, Dm
# => faster interpolate more fields to same depth
F = np.asarray(F)
S = np.asarray(S)
z = np.asarray(z, dtype="float")
Fshape = F.shape # Save original shape
if S.shape != Fshape:
raise ValueError("F and z_r must have same shape")
if z.shape and z.shape != Fshape[1:]:
raise ValueError("z must be scalar or have shape = F.shape[1:]")
# Flatten all non-vertical dimensions
N = F.shape[0] # Length of vertical dimension
M = F.size // N # Combined length of horizontal dimension(s)
F = F.reshape((N, M))
S = S.reshape((N, M))
if z.shape:
z = z.reshape((M,))
# Find integer array C with shape (M,)
# with S[C[i]-1, i] < z <= S[C[i], i]
# C = np.apply_along_axis(np.searchsorted, 0, S, z)
# but the following is much faster
C = np.sum(S < z, axis=0)
C = C.clip(1, N - 1)
# For vectorization
# construct index array tuples D and Dm such that
# F[D][i] = F[C[i], i]
# F[Dm][i] = F[C[i]-1, i]
I = np.arange(M, dtype="int")
D = (C, I)
Dm = (C - 1, I)
# Compute interpolation weights
A = (z - S[Dm]) / (S[D] - S[Dm])
A = A.clip(0.0, 1.0) # Control the extrapolation
# Do the linear interpolation
R = (1 - A) * F[Dm] + A * F[D]
# Give the result the correct s
R = R.reshape(Fshape[1:])
return R
# -----------------------------------------------
def s_stretch(N, theta_s, theta_b, stagger="rho", Vstretching=1):
"""Compute a s-level stretching array
*N* : Number of vertical levels
*theta_s* : Surface stretching factor
*theta_b* : Bottom stretching factor
*stagger* : "rho"|"w"
*Vstretching* : 1|2|4
"""
if stagger == "rho":
S = -1.0 + (0.5 + np.arange(N)) / N
elif stagger == "w":
S = np.linspace(-1.0, 0.0, N + 1)
else:
raise ValueError("stagger must be 'rho' or 'w'")
if Vstretching == 1:
cff1 = 1.0 / np.sinh(theta_s)
cff2 = 0.5 / np.tanh(0.5 * theta_s)
return (1.0 - theta_b) * cff1 * np.sinh(theta_s * S) + theta_b * (
cff2 * np.tanh(theta_s * (S + 0.5)) - 0.5
)
elif Vstretching == 2:
a, b = 1.0, 1.0
Csur = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
Cbot = np.sinh(theta_b * (S + 1)) / np.sinh(theta_b) - 1
mu = (S + 1) ** a * (1 + (a / b) * (1 - (S + 1) ** b))
return mu * Csur + (1 - mu) * Cbot
elif Vstretching == 4:
C = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
C = (np.exp(theta_b * C) - 1) / (1 - np.exp(-theta_b))
return C
elif Vstretching == 5:
if stagger == "w":
K = np.arange(N + 1)
if stagger == "rho":
K = np.arange(0.5, N + 1)
S1 = -(K * K - 2 * K * N + K + N * N - N) / (N * N - N)
S2 = -0.01 * (K * K - K * N) / (1 - N)
S = S1 + S2
C = (1 - np.cosh(theta_s * S)) / (np.cosh(theta_s) - 1)
C = (np.exp(theta_b * C) - 1) / (1 - np.exp(-theta_b))
else:
raise ValueError("Unknown Vstretching")
def invert_s(F: xr.DataArray, value: Surface):
"""Return highest (shallowest) s-value such that F(s,...) = value
F = DataArray with z_rho as coordinate
The vertical dimension in F must be first, axis=0
F must not have a time dimension
Returns D, Dm, a
F[Dm] <= value <= F[D] (or opposite inequalities)
and a is the interpolation weight:
value = (1-a)*F(K-1) + a*F(K)
a = nan if this is not possible
"""
val = value
# Work on numpy arrays
F0 = F.values
# z_rho = F.z_rho.values
# s_rho = F.s_rho.values
val = np.asarray(val, dtype="float")
# Fshape = F.shape # Save original shape
# if val.shape and val.shape != Fshape[1:]:
# raise ValueError("z must be scalar or have shape = F.shape[1:]")
# Flatten all non-vertical dimensions
N = F.shape[0] # Length of vertical dimension
M = F0.size // N # Combined length of horizontal dimensions
F0 = F0.reshape((N, M))
if val.shape: # Value may be space dependent
val = val.reshape((M,))
# Look for highest s-value where G is negative
G = (F0[1:, :] - val) * (F0[:-1, :] - val)
G = G[::-1, :] # Reverse
K = N - 1 - (G <= 0).argmax(axis=0)
# Define D such that F[D][i] = F[K[i], i]
I = np.arange(M)
D = (K, I)
Dm = (K - 1, I)
# Compute interpolation weights
a = (val - F0[Dm]) / (F0[D] - F0[Dm] + 1e-30)
# Only use 0 <= a <= 1
a[np.abs(a - 0.5) > 0.5] = np.nan #
return D, Dm, a
class HorizontalSlicer:
"""Reduce to horizontal view by slicing
F = DataArray, time-independent, first dimension is vertical
value = slice value
If F is not monotonous, returns the shallowest depth where F = value
"""
def __init__(self, F: xr.DataArray, value: Surface) -> None:
self.D, self.Dm, self.a = invert_s(F, value)
self.M = len(self.a)
# self.dims = F.dims
def __call__(self, G: xr.DataArray) -> xr.DataArray:
"""G must have same vertical and horizontal dimensions as F"""
if "ocean_time" in G.dims:
ntimes = G.shape[0]
kmax = G.shape[1]
R: List[np.ndarray] = []
for t in range(ntimes):
G0 = G.isel(ocean_time=t).values
G0 = G0.reshape((kmax, self.M))
R0 = (1 - self.a) * G0[self.Dm] + self.a * G0[self.D]
R0 = R0.reshape(G.shape[2:])
R.append(R0)
R1 = np.array(R)
else:
kmax = G.shape[0]
G0 = G.values
G0 = G0.reshape((kmax, self.M))
R1 = (1 - self.a) * G0[self.Dm] + self.a * G0[self.D]
R1 = R1.reshape(G.shape[1:])
# Return a DataArray
# Should have something on z_rho?
dims = list(G.dims)
dims.remove("s_rho")
coords = {dim: G.coords[dim] for dim in dims}
coords["lon_rho"] = G.coords["lon_rho"]
coords["lat_rho"] = G.coords["lat_rho"]
return xr.DataArray(R1, dims=dims, coords=coords, attrs=G.attrs)
|
[
"numpy.outer",
"numpy.sum",
"numpy.tanh",
"numpy.abs",
"numpy.asarray",
"numpy.arange",
"xarray.DataArray",
"numpy.linspace",
"numpy.array",
"numpy.cosh",
"numpy.exp",
"numpy.sinh"
] |
[((1334, 1347), 'numpy.asarray', 'np.asarray', (['H'], {}), '(H)\n', (1344, 1347), True, 'import numpy as np\n'), ((1462, 1475), 'numpy.asarray', 'np.asarray', (['C'], {}), '(C)\n', (1472, 1475), True, 'import numpy as np\n'), ((3411, 3424), 'numpy.asarray', 'np.asarray', (['F'], {}), '(F)\n', (3421, 3424), True, 'import numpy as np\n'), ((3433, 3446), 'numpy.asarray', 'np.asarray', (['S'], {}), '(S)\n', (3443, 3446), True, 'import numpy as np\n'), ((3455, 3483), 'numpy.asarray', 'np.asarray', (['z'], {'dtype': '"""float"""'}), "(z, dtype='float')\n", (3465, 3483), True, 'import numpy as np\n'), ((4173, 4194), 'numpy.sum', 'np.sum', (['(S < z)'], {'axis': '(0)'}), '(S < z, axis=0)\n', (4179, 4194), True, 'import numpy as np\n'), ((4369, 4394), 'numpy.arange', 'np.arange', (['M'], {'dtype': '"""int"""'}), "(M, dtype='int')\n", (4378, 4394), True, 'import numpy as np\n'), ((7020, 7050), 'numpy.asarray', 'np.asarray', (['val'], {'dtype': '"""float"""'}), "(val, dtype='float')\n", (7030, 7050), True, 'import numpy as np\n'), ((7713, 7725), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (7722, 7725), True, 'import numpy as np\n'), ((1879, 1893), 'numpy.outer', 'np.outer', (['C', 'H'], {}), '(C, H)\n', (1887, 1893), True, 'import numpy as np\n'), ((9444, 9501), 'xarray.DataArray', 'xr.DataArray', (['R1'], {'dims': 'dims', 'coords': 'coords', 'attrs': 'G.attrs'}), '(R1, dims=dims, coords=coords, attrs=G.attrs)\n', (9456, 9501), True, 'import xarray as xr\n'), ((1672, 1697), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(0.0)', 'N'], {}), '(-1.0, 0.0, N)\n', (1683, 1697), True, 'import numpy as np\n'), ((5163, 5192), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(0.0)', '(N + 1)'], {}), '(-1.0, 0.0, N + 1)\n', (5174, 5192), True, 'import numpy as np\n'), ((5307, 5323), 'numpy.sinh', 'np.sinh', (['theta_s'], {}), '(theta_s)\n', (5314, 5323), True, 'import numpy as np\n'), ((5345, 5367), 'numpy.tanh', 'np.tanh', (['(0.5 * theta_s)'], {}), '(0.5 * theta_s)\n', (5352, 5367), True, 'import numpy as np\n'), ((7881, 7896), 'numpy.abs', 'np.abs', (['(a - 0.5)'], {}), '(a - 0.5)\n', (7887, 7896), True, 'import numpy as np\n'), ((8916, 8927), 'numpy.array', 'np.array', (['R'], {}), '(R)\n', (8924, 8927), True, 'import numpy as np\n'), ((2024, 2038), 'numpy.outer', 'np.outer', (['C', 'H'], {}), '(C, H)\n', (2032, 2038), True, 'import numpy as np\n'), ((5408, 5428), 'numpy.sinh', 'np.sinh', (['(theta_s * S)'], {}), '(theta_s * S)\n', (5415, 5428), True, 'import numpy as np\n'), ((1590, 1602), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1599, 1602), True, 'import numpy as np\n'), ((5108, 5120), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (5117, 5120), True, 'import numpy as np\n'), ((5579, 5599), 'numpy.cosh', 'np.cosh', (['(theta_s * S)'], {}), '(theta_s * S)\n', (5586, 5599), True, 'import numpy as np\n'), ((5604, 5620), 'numpy.cosh', 'np.cosh', (['theta_s'], {}), '(theta_s)\n', (5611, 5620), True, 'import numpy as np\n'), ((5641, 5667), 'numpy.sinh', 'np.sinh', (['(theta_b * (S + 1))'], {}), '(theta_b * (S + 1))\n', (5648, 5667), True, 'import numpy as np\n'), ((5670, 5686), 'numpy.sinh', 'np.sinh', (['theta_b'], {}), '(theta_b)\n', (5677, 5686), True, 'import numpy as np\n'), ((5462, 5490), 'numpy.tanh', 'np.tanh', (['(theta_s * (S + 0.5))'], {}), '(theta_s * (S + 0.5))\n', (5469, 5490), True, 'import numpy as np\n'), ((5842, 5862), 'numpy.cosh', 'np.cosh', (['(theta_s * S)'], {}), '(theta_s * S)\n', (5849, 5862), True, 'import numpy as np\n'), ((5867, 5883), 'numpy.cosh', 'np.cosh', (['theta_s'], {}), '(theta_s)\n', (5874, 5883), True, 'import numpy as np\n'), ((5902, 5921), 'numpy.exp', 'np.exp', (['(theta_b * C)'], {}), '(theta_b * C)\n', (5908, 5921), True, 'import numpy as np\n'), ((5934, 5950), 'numpy.exp', 'np.exp', (['(-theta_b)'], {}), '(-theta_b)\n', (5940, 5950), True, 'import numpy as np\n'), ((6040, 6056), 'numpy.arange', 'np.arange', (['(N + 1)'], {}), '(N + 1)\n', (6049, 6056), True, 'import numpy as np\n'), ((6102, 6123), 'numpy.arange', 'np.arange', (['(0.5)', '(N + 1)'], {}), '(0.5, N + 1)\n', (6111, 6123), True, 'import numpy as np\n'), ((6272, 6292), 'numpy.cosh', 'np.cosh', (['(theta_s * S)'], {}), '(theta_s * S)\n', (6279, 6292), True, 'import numpy as np\n'), ((6297, 6313), 'numpy.cosh', 'np.cosh', (['theta_s'], {}), '(theta_s)\n', (6304, 6313), True, 'import numpy as np\n'), ((6332, 6351), 'numpy.exp', 'np.exp', (['(theta_b * C)'], {}), '(theta_b * C)\n', (6338, 6351), True, 'import numpy as np\n'), ((6364, 6380), 'numpy.exp', 'np.exp', (['(-theta_b)'], {}), '(-theta_b)\n', (6370, 6380), True, 'import numpy as np\n')]
|
import sys
sys.path.insert(1, "lib/")
# This going against PEP-8 will refactor once we have a build pipeline
import os
import json
import logging
import requests
import cachetools.func
class MovieApis:
api_key = os.environ.get("API_KEY")
# instance method
@cachetools.func.ttl_cache(maxsize=20, ttl=300)
def get_movie_details(movie_id: str, region: str) -> dict:
params = {
"api_key": MovieApis.api_key,
"region": region
}
try:
r = requests.get(
f"https://api.themoviedb.org/3/movie/{movie_id}", params)
r.raise_for_status()
except Exception as e:
logging.exception(f"Error getting movie details: {e}")
data = json.loads(r.text)
return data
# instance method
@cachetools.func.ttl_cache(maxsize=5, ttl=300)
def get_list_of_movies(pages: int) -> list:
movie_list = []
params = {
"api_key": MovieApis.api_key,
"region": "US",
"page": []
}
for pages in range(pages):
params["page"].append(pages + 1)
try:
r = requests.get(
"https://api.themoviedb.org/3/movie/now_playing", params)
r.raise_for_status()
except requests.exceptions.RequestException as e:
logging.exception(f"Error getting list of movies: {e}")
data = json.loads(r.text)
for item in data["results"]:
movie = {
"text": {
"type": "plain_text",
"text": item["original_title"]
},
"value": str(item["id"])
}
movie_list.append(movie)
return movie_list
# Start your app
if __name__ == "__main__":
MovieApis.get_list_of_movies("en-US", 3)
|
[
"logging.exception",
"json.loads",
"sys.path.insert",
"os.environ.get",
"requests.get"
] |
[((11, 37), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""lib/"""'], {}), "(1, 'lib/')\n", (26, 37), False, 'import sys\n'), ((218, 243), 'os.environ.get', 'os.environ.get', (['"""API_KEY"""'], {}), "('API_KEY')\n", (232, 243), False, 'import os\n'), ((746, 764), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (756, 764), False, 'import json\n'), ((511, 581), 'requests.get', 'requests.get', (['f"""https://api.themoviedb.org/3/movie/{movie_id}"""', 'params'], {}), "(f'https://api.themoviedb.org/3/movie/{movie_id}', params)\n", (523, 581), False, 'import requests\n'), ((1453, 1471), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (1463, 1471), False, 'import json\n'), ((675, 729), 'logging.exception', 'logging.exception', (['f"""Error getting movie details: {e}"""'], {}), "(f'Error getting movie details: {e}')\n", (692, 729), False, 'import logging\n'), ((1171, 1241), 'requests.get', 'requests.get', (['"""https://api.themoviedb.org/3/movie/now_playing"""', 'params'], {}), "('https://api.themoviedb.org/3/movie/now_playing', params)\n", (1183, 1241), False, 'import requests\n'), ((1378, 1433), 'logging.exception', 'logging.exception', (['f"""Error getting list of movies: {e}"""'], {}), "(f'Error getting list of movies: {e}')\n", (1395, 1433), False, 'import logging\n')]
|
# Dependencies
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
# Go to existing database with automap_base
from sqlalchemy.ext.automap import automap_base
# Work through mapper to use python code
from sqlalchemy.orm import Session, relationship
# Inspect with python
from sqlalchemy import create_engine, inspect
# Allow us to declare column types
from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc
from sqlalchemy.ext.declarative import declarative_base
import datetime
import pandas as pd
import numpy as np
import json
def update_weather(lat_search):
# Sets an object to utilize the default declarative base in SQL Alchemy
Base = declarative_base()
## Class base template to upload to sqlite
class WeatherSeries(Base):
__tablename__ = 'weatherSeries'
id = Column(Integer, primary_key=True)
city = Column(String(50))
country = Column(String(200))
region = Column(String(80))
avgtemp = Column(Float)
date = Column(String(12))
date_epoch = Column(Float)
maxtemp = Column(Float)
mintemp = Column(Float)
sunhour = Column(Float)
totalsnow = Column(Float)
uv_index = Column(Float)
magnitude = Column(Float)
place = Column(String(80))
lat = Column(String(12))
long = Column(String(12))
# Create Database Connection
# ----------------------------------
# Creates a connection to our DB
# Engine opens the door. Conn is the walk through sign
engine = create_engine("sqlite:///earthquake_weather.sqlite")
conn = engine.connect()
# Create a "Metadata" Layer That Abstracts our SQL Database
# ----------------------------------
# Create (if not already in existence) the tables associated with our classes.
Base.metadata.create_all(engine)
# Create a Session Object to Connect to DB
# ----------------------------------
session = Session(bind=engine)
def weatherTimeSeries(query_call):
Base = automap_base()
Base.prepare(engine, reflect=True)
# Check db table names
# Base.classes.keys()
weather_table = Base.classes.weatherSeries
weather_container = session.query(weather_table).filter(weather_table.lat == query_call).all()
weather_data = []
def spellDate(datestring):
date_time_obj = datetime.datetime.strptime(datestring, '%Y-%m-%d')
month_name = date_time_obj.strftime("%B")
day = date_time_obj.strftime("%d")
year = date_time_obj.strftime("%Y")
month_day = month_name + " " + day
month_day_year = month_name + " " + day + ", " + year
date = {
"month_day": month_day,
"month_day_year": month_day_year,
}
return date
for data in weather_container:
date_date = data.date
date_to_pass = spellDate(date_date)
container = {
"city": data.city,
"country": data.country,
"region": data.region,
"avgtemp": data.avgtemp,
"date": date_to_pass,
"date_epoch": data.date_epoch,
"maxtemp": data.maxtemp,
"mintemp": data.mintemp,
"sunhour": data.sunhour,
"totalsnow": data.totalsnow,
"uv_index": data.uv_index,
"magnitude": data.magnitude,
"place": data.place,
"lat": data.lat,
"long": data.long
}
weather_data.append(container)
return weather_data
latitude = lat_search
weather_data = weatherTimeSeries(latitude)
# Return results
return weather_data
#################################################################
## Facts
##################################################################
def aboveSixQuakeCall():
# Sets an object to utilize the default declarative base in SQL Alchemy
Base = declarative_base()
## Class base template to upload to sqlite
class WeatherSeries(Base):
__tablename__ = 'weatherSeries'
id = Column(Integer, primary_key=True)
city = Column(String(50))
country = Column(String(200))
region = Column(String(80))
avgtemp = Column(Float)
date = Column(String(12))
date_epoch = Column(Float)
maxtemp = Column(Float)
mintemp = Column(Float)
sunhour = Column(Float)
totalsnow = Column(Float)
uv_index = Column(Float)
magnitude = Column(Float)
place = Column(String(80))
lat = Column(String(12))
long = Column(String(12))
# Create Database Connection
# ----------------------------------
# Creates a connection to our DB
# Engine opens the door. Conn is the walk through sign
engine = create_engine("sqlite:///earthquake_weather.sqlite")
conn = engine.connect()
# Create a "Metadata" Layer That Abstracts our SQL Database
# ----------------------------------
# Create (if not already in existence) the tables associated with our classes.
Base.metadata.create_all(engine)
# Create a Session Object to Connect to DB
# ----------------------------------
session = Session(bind=engine)
def aboveSixQuake():
Base = automap_base()
Base.prepare(engine, reflect=True)
# Check db table names
# Base.classes.keys()
weather_table = Base.classes.weatherSeries
weather_container = session.query(weather_table).filter(weather_table.magnitude > 6).all()
weather_highesteq = session.query(weather_table).order_by(desc(weather_table.magnitude)).order_by(desc(weather_table.date)).limit(4).all()
weather_facts = []
magnitude_list = []
count = 0
magnitude_keep = 6
for data in weather_highesteq:
magnitude = data.magnitude
# Get highest recorded earthquake
if data.magnitude > magnitude_keep:
magnitude_keep = data.magnitude
location = data.country
city = data.city
temp_low = data.mintemp
temp_high = data.maxtemp
avg_temp_at_time = data.avgtemp
date = data.date
magnitude = magnitude_keep
else:
continue
# Counter
for data in weather_container:
count += 1
def spellDate(datestring):
date_time_obj = datetime.datetime.strptime(datestring, '%Y-%m-%d')
month_name = date_time_obj.strftime("%B")
day = date_time_obj.strftime("%d")
year = date_time_obj.strftime("%Y")
month_day = month_name + " " + day
month_day_year = month_name + " " + day + ", " + year
date = {
"month_day": month_day,
"month_day_year": month_day_year,
}
return date
# Get avgtemp from list
# def Average(lst):
# return sum(lst) / len(lst)
# quake_avg = Average(magnitude_list)
spell_dates = spellDate(date)
container = {
"count": count,
# "avgtemp": quake_avg,
"highest_magnitude": magnitude_keep,
"highest_city": city,
"highest_location": location,
"temp_low": temp_low,
"temp_high": temp_high,
"avg_temp_at_time": avg_temp_at_time,
"date": spell_dates,
}
weather_facts.append(container)
return weather_facts
weather_facts = aboveSixQuake()
# Return results
return weather_facts
#################################################################
## Facts - Latest Quake
##################################################################
def latestQuakesCall():
# Sets an object to utilize the default declarative base in SQL Alchemy
Base = declarative_base()
## Class base template to upload to sqlite
class WeatherSeries(Base):
__tablename__ = 'weatherSeries'
id = Column(Integer, primary_key=True)
city = Column(String(50))
country = Column(String(200))
region = Column(String(80))
avgtemp = Column(Float)
date = Column(String(12))
date_epoch = Column(Float)
maxtemp = Column(Float)
mintemp = Column(Float)
sunhour = Column(Float)
totalsnow = Column(Float)
uv_index = Column(Float)
magnitude = Column(Float)
place = Column(String(80))
lat = Column(String(12))
long = Column(String(12))
# Create Database Connection
# ----------------------------------
# Creates a connection to our DB
# Engine opens the door. Conn is the walk through sign
engine = create_engine("sqlite:///earthquake_weather.sqlite")
conn = engine.connect()
# Create a "Metadata" Layer That Abstracts our SQL Database
# ----------------------------------
# Create (if not already in existence) the tables associated with our classes.
Base.metadata.create_all(engine)
# Create a Session Object to Connect to DB
# ----------------------------------
session = Session(bind=engine)
def latestQuakes():
Base = automap_base()
Base.prepare(engine, reflect=True)
weather_table = Base.classes.weatherSeries
weather_container = session.query(weather_table).order_by(desc(weather_table.date)).limit(5).all()
weather_facts5 = []
weather_facts5_done = []
def spellDate(datestring):
date_time_obj = datetime.datetime.strptime(datestring, '%Y-%m-%d')
month_name = date_time_obj.strftime("%B")
day = date_time_obj.strftime("%d")
year = date_time_obj.strftime("%Y")
month_day = month_name + " " + day
month_day_year = month_name + " " + day + ", " + year
date = {
"month_day": month_day,
"month_day_year": month_day_year,
}
return date
for data in weather_container:
spell_dates = spellDate( data.date)
container = {
"date": spell_dates,
"country": data.country,
"region": data.region,
"magnitude": data.magnitude,
"maxtemp": data.maxtemp,
"mintemp": data.mintemp,
"avgtemp": data.avgtemp,
}
weather_facts5.append(container)
return weather_facts5
weather_facts5 = latestQuakes()
# Return results
return weather_facts5
#################################################################
## Analysis Chart
##################################################################
def analysisChartCall():
# Sets an object to utilize the default declarative base in SQL Alchemy
Base = declarative_base()
## Class base template to upload to sqlite
class WeatherSeries(Base):
__tablename__ = 'weatherSeries'
id = Column(Integer, primary_key=True)
city = Column(String(50))
country = Column(String(200))
region = Column(String(80))
avgtemp = Column(Float)
date = Column(String(12))
date_epoch = Column(Float)
maxtemp = Column(Float)
mintemp = Column(Float)
sunhour = Column(Float)
totalsnow = Column(Float)
uv_index = Column(Float)
magnitude = Column(Float)
place = Column(String(80))
lat = Column(String(12))
long = Column(String(12))
# Create Database Connection
# ----------------------------------
# Creates a connection to our DB
# Engine opens the door. Conn is the walk through sign
engine = create_engine("sqlite:///earthquake_weather.sqlite")
conn = engine.connect()
# Create a "Metadata" Layer That Abstracts our SQL Database
# ----------------------------------
# Create (if not already in existence) the tables associated with our classes.
Base.metadata.create_all(engine)
# Create a Session Object to Connect to DB
# ----------------------------------
session = Session(bind=engine)
def analysisChart():
Base = automap_base()
Base.prepare(engine, reflect=True)
weather_table = Base.classes.weatherSeries
analysis_container = session.query(weather_table).order_by(desc(weather_table.date)).all()
analysis_list_temp = []
x=1
for data in analysis_container:
# get specific data from db
container = {
"date": data.date,
"magnitude": data.magnitude,
"maxtemp": data.maxtemp,
"mintemp": data.mintemp,
# "avgtemp": data.avgtemp,
"lat": data.lat,
}
analysis_list_temp.append(container)
# Create df for parsing
temp_df = pd.DataFrame(analysis_list_temp)
# Sort by lat and date, reset index
temp_df = temp_df.sort_values(by=['lat', 'date'], ascending=False).reset_index(drop=True)
# Make copy of df, remove 2nd and 3rd log keeping 1st and 4th log of one eq entry.
run_df = temp_df.copy()
while x < len(temp_df.index):
run_df=run_df.drop(x)
x+=1
run_df=run_df.drop(x)
x+=3
# Reset index
run_df = run_df.reset_index(drop=True)
# get difference of weather change from day of eq and few days before
i = 0
new_col = []
# Icon list will tell style which icon to display
icon_list = []
while i < len(run_df.index):
# for data in run_df.index:
first = run_df.iloc[i,2]
second = run_df.iloc[i+1, 2]
difference = first - second
new_col.append(difference)
new_col.append(difference)
i+=2
# Add new list to df as a new column
run_df['difference'] = new_col
# Remove duplicates
run_df2 = run_df.copy()
v = 1
while v < len(run_df.index):
run_df2=run_df2.drop(v)
v+=2
# Count up, nochange, down
up_count = 0
nochange_count = 0
down_count = 0
for x in run_df2['difference']:
if x > 0:
icon = "up"
up_count+=1
icon_list.append(icon)
elif x == 0:
icon = "nochange"
nochange_count+=1
icon_list.append(icon)
else:
icon = "down"
down_count+=1
icon_list.append(icon)
# Add new list to df as a new column
run_df2['icon'] = icon_list
# select only the columns we need
run_df2 = run_df2[['date','magnitude','lat','difference','icon']]
# # Turn df into list of tuples
records = run_df2.to_records(index=False)
analysis_chart = list(records)
# Create list of tuple
analysis_list = []
for data in analysis_chart:
container2 = {
"date": data.date,
"magnitude": data.magnitude,
"lat": data.lat,
"difference": data.difference,
"icon": data.icon,
}
analysis_list.append(container2)
diff_count = len(run_df2['difference'])
above_percentage = "{:.0%}".format(up_count / diff_count)
atzero_percentage = "{:.0%}".format(nochange_count / diff_count)
belowzero_percentage = "{:.0%}".format(down_count / diff_count)
container3 = {
"abovezero": up_count,
"abovezeropercent": above_percentage,
"atzero": nochange_count,
"atzeropercent": atzero_percentage,
"belowzero": down_count,
"belowzeropercent": belowzero_percentage,
}
analysis_list.append(container3)
return analysis_list
analysis_list = analysisChart()
return analysis_list
|
[
"pandas.DataFrame",
"sqlalchemy.String",
"sqlalchemy.orm.Session",
"sqlalchemy.ext.declarative.declarative_base",
"datetime.datetime.strptime",
"sqlalchemy.Column",
"sqlalchemy.desc",
"sqlalchemy.create_engine",
"sqlalchemy.ext.automap.automap_base"
] |
[((709, 727), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (725, 727), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((1586, 1638), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///earthquake_weather.sqlite"""'], {}), "('sqlite:///earthquake_weather.sqlite')\n", (1599, 1638), False, 'from sqlalchemy import create_engine, inspect\n'), ((1994, 2014), 'sqlalchemy.orm.Session', 'Session', ([], {'bind': 'engine'}), '(bind=engine)\n', (2001, 2014), False, 'from sqlalchemy.orm import Session, relationship\n'), ((4137, 4155), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (4153, 4155), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((5014, 5066), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///earthquake_weather.sqlite"""'], {}), "('sqlite:///earthquake_weather.sqlite')\n", (5027, 5066), False, 'from sqlalchemy import create_engine, inspect\n'), ((5422, 5442), 'sqlalchemy.orm.Session', 'Session', ([], {'bind': 'engine'}), '(bind=engine)\n', (5429, 5442), False, 'from sqlalchemy.orm import Session, relationship\n'), ((8212, 8230), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (8228, 8230), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((9089, 9141), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///earthquake_weather.sqlite"""'], {}), "('sqlite:///earthquake_weather.sqlite')\n", (9102, 9141), False, 'from sqlalchemy import create_engine, inspect\n'), ((9497, 9517), 'sqlalchemy.orm.Session', 'Session', ([], {'bind': 'engine'}), '(bind=engine)\n', (9504, 9517), False, 'from sqlalchemy.orm import Session, relationship\n'), ((11190, 11208), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (11206, 11208), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((12067, 12119), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///earthquake_weather.sqlite"""'], {}), "('sqlite:///earthquake_weather.sqlite')\n", (12080, 12119), False, 'from sqlalchemy import create_engine, inspect\n'), ((12475, 12495), 'sqlalchemy.orm.Session', 'Session', ([], {'bind': 'engine'}), '(bind=engine)\n', (12482, 12495), False, 'from sqlalchemy.orm import Session, relationship\n'), ((860, 893), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (866, 893), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((1020, 1033), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (1026, 1033), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((1089, 1102), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (1095, 1102), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((1121, 1134), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (1127, 1134), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((1153, 1166), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (1159, 1166), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((1185, 1198), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (1191, 1198), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((1219, 1232), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (1225, 1232), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((1252, 1265), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (1258, 1265), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((1286, 1299), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (1292, 1299), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((2070, 2084), 'sqlalchemy.ext.automap.automap_base', 'automap_base', ([], {}), '()\n', (2082, 2084), False, 'from sqlalchemy.ext.automap import automap_base\n'), ((4288, 4321), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (4294, 4321), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((4448, 4461), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (4454, 4461), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((4517, 4530), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (4523, 4530), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((4549, 4562), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (4555, 4562), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((4581, 4594), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (4587, 4594), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((4613, 4626), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (4619, 4626), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((4647, 4660), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (4653, 4660), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((4680, 4693), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (4686, 4693), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((4714, 4727), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (4720, 4727), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((5484, 5498), 'sqlalchemy.ext.automap.automap_base', 'automap_base', ([], {}), '()\n', (5496, 5498), False, 'from sqlalchemy.ext.automap import automap_base\n'), ((8363, 8396), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (8369, 8396), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((8523, 8536), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (8529, 8536), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((8592, 8605), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (8598, 8605), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((8624, 8637), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (8630, 8637), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((8656, 8669), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (8662, 8669), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((8688, 8701), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (8694, 8701), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((8722, 8735), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (8728, 8735), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((8755, 8768), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (8761, 8768), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((8789, 8802), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (8795, 8802), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((9558, 9572), 'sqlalchemy.ext.automap.automap_base', 'automap_base', ([], {}), '()\n', (9570, 9572), False, 'from sqlalchemy.ext.automap import automap_base\n'), ((11341, 11374), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (11347, 11374), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((11501, 11514), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (11507, 11514), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((11570, 11583), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (11576, 11583), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((11602, 11615), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (11608, 11615), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((11634, 11647), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (11640, 11647), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((11666, 11679), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (11672, 11679), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((11700, 11713), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (11706, 11713), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((11733, 11746), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (11739, 11746), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((11767, 11780), 'sqlalchemy.Column', 'Column', (['Float'], {}), '(Float)\n', (11773, 11780), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((12541, 12555), 'sqlalchemy.ext.automap.automap_base', 'automap_base', ([], {}), '()\n', (12553, 12555), False, 'from sqlalchemy.ext.automap import automap_base\n'), ((13268, 13300), 'pandas.DataFrame', 'pd.DataFrame', (['analysis_list_temp'], {}), '(analysis_list_temp)\n', (13280, 13300), True, 'import pandas as pd\n'), ((916, 926), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (922, 926), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((953, 964), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (959, 964), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((990, 1000), 'sqlalchemy.String', 'String', (['(80)'], {}), '(80)\n', (996, 1000), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((1056, 1066), 'sqlalchemy.String', 'String', (['(12)'], {}), '(12)\n', (1062, 1066), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((1323, 1333), 'sqlalchemy.String', 'String', (['(80)'], {}), '(80)\n', (1329, 1333), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((1356, 1366), 'sqlalchemy.String', 'String', (['(12)'], {}), '(12)\n', (1362, 1366), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((1390, 1400), 'sqlalchemy.String', 'String', (['(12)'], {}), '(12)\n', (1396, 1400), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((2434, 2484), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['datestring', '"""%Y-%m-%d"""'], {}), "(datestring, '%Y-%m-%d')\n", (2460, 2484), False, 'import datetime\n'), ((4344, 4354), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (4350, 4354), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((4381, 4392), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (4387, 4392), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((4418, 4428), 'sqlalchemy.String', 'String', (['(80)'], {}), '(80)\n', (4424, 4428), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((4484, 4494), 'sqlalchemy.String', 'String', (['(12)'], {}), '(12)\n', (4490, 4494), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((4751, 4761), 'sqlalchemy.String', 'String', (['(80)'], {}), '(80)\n', (4757, 4761), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((4784, 4794), 'sqlalchemy.String', 'String', (['(12)'], {}), '(12)\n', (4790, 4794), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((4818, 4828), 'sqlalchemy.String', 'String', (['(12)'], {}), '(12)\n', (4824, 4828), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((6723, 6773), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['datestring', '"""%Y-%m-%d"""'], {}), "(datestring, '%Y-%m-%d')\n", (6749, 6773), False, 'import datetime\n'), ((8419, 8429), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (8425, 8429), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((8456, 8467), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (8462, 8467), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((8493, 8503), 'sqlalchemy.String', 'String', (['(80)'], {}), '(80)\n', (8499, 8503), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((8559, 8569), 'sqlalchemy.String', 'String', (['(12)'], {}), '(12)\n', (8565, 8569), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((8826, 8836), 'sqlalchemy.String', 'String', (['(80)'], {}), '(80)\n', (8832, 8836), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((8859, 8869), 'sqlalchemy.String', 'String', (['(12)'], {}), '(12)\n', (8865, 8869), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((8893, 8903), 'sqlalchemy.String', 'String', (['(12)'], {}), '(12)\n', (8899, 8903), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((9899, 9949), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['datestring', '"""%Y-%m-%d"""'], {}), "(datestring, '%Y-%m-%d')\n", (9925, 9949), False, 'import datetime\n'), ((11397, 11407), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (11403, 11407), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((11434, 11445), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (11440, 11445), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((11471, 11481), 'sqlalchemy.String', 'String', (['(80)'], {}), '(80)\n', (11477, 11481), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((11537, 11547), 'sqlalchemy.String', 'String', (['(12)'], {}), '(12)\n', (11543, 11547), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((11804, 11814), 'sqlalchemy.String', 'String', (['(80)'], {}), '(80)\n', (11810, 11814), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((11837, 11847), 'sqlalchemy.String', 'String', (['(12)'], {}), '(12)\n', (11843, 11847), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((11871, 11881), 'sqlalchemy.String', 'String', (['(12)'], {}), '(12)\n', (11877, 11881), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((12717, 12741), 'sqlalchemy.desc', 'desc', (['weather_table.date'], {}), '(weather_table.date)\n', (12721, 12741), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((5859, 5883), 'sqlalchemy.desc', 'desc', (['weather_table.date'], {}), '(weather_table.date)\n', (5863, 5883), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((9734, 9758), 'sqlalchemy.desc', 'desc', (['weather_table.date'], {}), '(weather_table.date)\n', (9738, 9758), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n'), ((5819, 5848), 'sqlalchemy.desc', 'desc', (['weather_table.magnitude'], {}), '(weather_table.magnitude)\n', (5823, 5848), False, 'from sqlalchemy import Column, Integer, String, Text, DateTime, Float, Boolean, ForeignKey, desc\n')]
|
import numpy as np
import sys
#for calculate the loss
from sklearn.metrics import log_loss
from sklearn.metrics import make_scorer
#import three machine learning models
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
#for standardizing the data
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
import os
from os import mkdir, listdir
from os.path import join, isdir, dirname
from time import strftime
import constants as ct
import configparser
import argparse
import logging
import random
import pandas
import pickle
import joblib
logger = logging.getLogger('cumul')
random.seed(1123)
np.random.seed(1123)
'''params'''
r = 10
def score_func(ground_truths, predictions):
global MON_SITE_NUM, tps, wps, fps, ps, ns, flag
tp, wp, fp, p, n = 0, 0, 0, 0 ,0
for truth,prediction in zip(ground_truths, predictions):
if truth != MON_SITE_NUM:
p += 1
else:
n += 1
if prediction != MON_SITE_NUM:
if truth == prediction:
tp += 1
else:
if truth != MON_SITE_NUM:
wp += 1
# logger.info('Wrong positive:%d %d'%(truth, prediction))
else:
fp += 1
# logger.info('False positive:%d %d'%(truth, prediction))
# logger.info('%4d %4d %4d %4d %4d'%(tp, wp, fp, p, n))
if flag:
tps += tp
wps += wp
fps += fp
ps += p
ns += n
try:
r_precision = tp*n / (tp*n+wp*n+r*p*fp)
except:
r_precision = 0.0
# logger.info('r-precision:%.4f',r_precision)
# return r_precision
return tp/p
def read_conf(file):
cf = configparser.ConfigParser()
cf.read(file)
return dict(cf['default'])
def parse_arguments():
parser = argparse.ArgumentParser(description='It simulates adaptive padding on a set of web traffic traces.')
parser.add_argument('fp',
metavar='<feature path>',
help='Path to the directory of the extracted features')
parser.add_argument('type',
metavar='<model type>',
help='train a clean or dirty model',
default="None")
parser.add_argument('--log',
type=str,
dest="log",
metavar='<log path>',
default='stdout',
help='path to the log file. It will print to stdout by default.')
# Parse arguments
args = parser.parse_args()
config_logger(args)
return args
def config_logger(args):
# Set file
log_file = sys.stdout
if args.log != 'stdout':
log_file = open(args.log, 'w')
ch = logging.StreamHandler(log_file)
# Set logging format
ch.setFormatter(logging.Formatter(ct.LOG_FORMAT))
logger.addHandler(ch)
# Set level format
logger.setLevel(logging.INFO)
#SVM with RBF kernel for open world!!
def GridSearch(train_X,train_Y):
global OPEN_WORLD
#find the optimal gamma
param_grid = [
{
'C': [2**11,2**13,2**15,2**17],
'gamma' : [2**-3,2**-1,2**1,2**3]
}
]
if OPEN_WORLD:
my_scorer = make_scorer(score_func, greater_is_better=True)
else:
my_scorer = "accuracy"
# clf = GridSearchCV(estimator = SVC(kernel = 'rbf'), param_grid = param_grid, \
# scoring = 'accuracy', cv = 10, verbose = 2, n_jobs = -1)
clf = GridSearchCV(estimator = SVC(kernel = 'rbf'), param_grid = param_grid, \
scoring = my_scorer, cv = 5, verbose = 0, n_jobs = -1)
clf.fit(train_X, train_Y)
# logger.info('Best estimator:%s'%clf.best_estimator_)
# logger.info('Best_score_:%s'%clf.best_score_)
return clf
if __name__ == '__main__':
global MON_SITE_NUM, tps, wps, fps, ps, ns, flag, OPEN_WORLD
tps, wps, fps, ps, ns = 0,0,0,0,0
flag = 0
args = parse_arguments()
# logger.info("Arguments: %s" % (args))
cf = read_conf(ct.confdir)
MON_SITE_NUM = int(cf['monitored_site_num'])
if cf['open_world'] == '1':
UNMON_SITE_NUM = int(cf['unmonitored_site_num'])
OPEN_WORLD = 1
else:
OPEN_WORLD = 0
# logger.info('loading data...')
dic = np.load(args.fp,allow_pickle=True).item()
X = np.array(dic['feature'])
y = np.array(dic['label'])
if not OPEN_WORLD:
X = X[y<MON_SITE_NUM]
y = y[y<MON_SITE_NUM]
# print(X.shape, y.shape)
#normalize the data
scaler = preprocessing.MinMaxScaler((-1,1))
X = scaler.fit_transform(X)
# logger.info('data are transformed into [-1,1]')
# find the optimal params
# logger.info('GridSearchCV...')
clf = GridSearch(X,y)
C = clf.best_params_['C']
gamma = clf.best_params_['gamma']
#C, gamma = 131072, 8.000000
# C, gamma = 8192, 8.00
# logger.info('Best params are: %d %f'%(C,gamma))
# sss = StratifiedShuffleSplit(n_splits=10, test_size=0.1, random_state=0)
# folder_num = 0
# flag = 1
# for train_index, test_index in sss.split(X,y):
# # logger.info('Testing fold %d'%folder_num)
# folder_num += 1
# # print("TRAIN:", train_index, "TEST:", test_index)
# X_train, X_test = X[train_index], X[test_index]
# y_train, y_test = y[train_index], y[test_index]
# model = SVC(C = C, gamma = gamma, kernel = 'rbf')
# model.fit(X_train, y_train)
# y_pred = model.predict(X_test)
# r_precision = score_func(y_test, y_pred)
# # logger.info('%d-presicion is %.4f'%(r, r_precision))
# print("%d %d %d %d %d"%(tps,wps,fps,ps,ns))
model = SVC(C = C, gamma = gamma, kernel = 'rbf')
model.fit(X, y)
joblib.dump(model, join(ct.modeldir,args.fp.split("/")[-1][:-4]+'.pkl'))
print('model have been saved')
|
[
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"logging.StreamHandler",
"sklearn.preprocessing.MinMaxScaler",
"logging.Formatter",
"sklearn.metrics.make_scorer",
"random.seed",
"numpy.array",
"sklearn.svm.SVC",
"configparser.ConfigParser",
"logging.getLogger"
] |
[((675, 701), 'logging.getLogger', 'logging.getLogger', (['"""cumul"""'], {}), "('cumul')\n", (692, 701), False, 'import logging\n'), ((702, 719), 'random.seed', 'random.seed', (['(1123)'], {}), '(1123)\n', (713, 719), False, 'import random\n'), ((720, 740), 'numpy.random.seed', 'np.random.seed', (['(1123)'], {}), '(1123)\n', (734, 740), True, 'import numpy as np\n'), ((1813, 1840), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1838, 1840), False, 'import configparser\n'), ((1931, 2036), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""It simulates adaptive padding on a set of web traffic traces."""'}), "(description=\n 'It simulates adaptive padding on a set of web traffic traces.')\n", (1954, 2036), False, 'import argparse\n'), ((2893, 2924), 'logging.StreamHandler', 'logging.StreamHandler', (['log_file'], {}), '(log_file)\n', (2914, 2924), False, 'import logging\n'), ((4454, 4478), 'numpy.array', 'np.array', (["dic['feature']"], {}), "(dic['feature'])\n", (4462, 4478), True, 'import numpy as np\n'), ((4487, 4509), 'numpy.array', 'np.array', (["dic['label']"], {}), "(dic['label'])\n", (4495, 4509), True, 'import numpy as np\n'), ((4664, 4699), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', (['(-1, 1)'], {}), '((-1, 1))\n', (4690, 4699), False, 'from sklearn import preprocessing\n'), ((5820, 5855), 'sklearn.svm.SVC', 'SVC', ([], {'C': 'C', 'gamma': 'gamma', 'kernel': '"""rbf"""'}), "(C=C, gamma=gamma, kernel='rbf')\n", (5823, 5855), False, 'from sklearn.svm import SVC\n'), ((2971, 3003), 'logging.Formatter', 'logging.Formatter', (['ct.LOG_FORMAT'], {}), '(ct.LOG_FORMAT)\n', (2988, 3003), False, 'import logging\n'), ((3365, 3412), 'sklearn.metrics.make_scorer', 'make_scorer', (['score_func'], {'greater_is_better': '(True)'}), '(score_func, greater_is_better=True)\n', (3376, 3412), False, 'from sklearn.metrics import make_scorer\n'), ((3641, 3658), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (3644, 3658), False, 'from sklearn.svm import SVC\n'), ((4401, 4436), 'numpy.load', 'np.load', (['args.fp'], {'allow_pickle': '(True)'}), '(args.fp, allow_pickle=True)\n', (4408, 4436), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import os
import json
import jieba.analyse
import jieba
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
sentiment_path = os.path.join(CURRENT_PATH, 'data', 'sentimentDict.json')
stopwords_path = os.path.join(CURRENT_PATH, 'data', 'stopwords.txt.json')
degree_path = os.path.join(CURRENT_PATH, 'data', 'degreeDict.json')
not_path = os.path.join(CURRENT_PATH, 'data', 'notDict.json')
jieba_dic_path = os.path.join(CURRENT_PATH, 'data', 'jieba.dic')
# 加载情感词典
jieba.load_userdict(jieba_dic_path)
class SentimentAnalysis():
def __init__(self):
self.sentiment_score_dic = self.load_json(sentiment_path)
self.degree_score = self.load_json(degree_path)
self.notwords = self.load_json(not_path)
def load_json(self, json_file_path):
with open(json_file_path, 'r', encoding='utf-8') as f:
return json.loads(f.read(), encoding='utf-8')
def analysis(self, sentence):
words = jieba.lcut(sentence)
score = self.sentiment_score_dic.get(words[0], 0)
if len(words) > 1:
score += self.sentiment_score_dic.get(words[1], 0) * self.notwords.get(words[0], 1) * self.degree_score.get(words[0], 1)
if len(words) > 2:
for i in range(2, len(words)):
score += self.sentiment_score_dic.get(words[i], 0) * self.notwords.get(words[i-1], 1) * \
self.degree_score.get(words[i-1], 1) * self.degree_score.get(words[i-2], 1) * \
self.notwords.get(words[i-2], 1)
if score < 0:
return {'negative': score}
if score > 0:
return {'positive': score}
return {'middle': score}
|
[
"jieba.load_userdict",
"os.path.abspath",
"os.path.join",
"jieba.lcut"
] |
[((159, 215), 'os.path.join', 'os.path.join', (['CURRENT_PATH', '"""data"""', '"""sentimentDict.json"""'], {}), "(CURRENT_PATH, 'data', 'sentimentDict.json')\n", (171, 215), False, 'import os\n'), ((233, 289), 'os.path.join', 'os.path.join', (['CURRENT_PATH', '"""data"""', '"""stopwords.txt.json"""'], {}), "(CURRENT_PATH, 'data', 'stopwords.txt.json')\n", (245, 289), False, 'import os\n'), ((304, 357), 'os.path.join', 'os.path.join', (['CURRENT_PATH', '"""data"""', '"""degreeDict.json"""'], {}), "(CURRENT_PATH, 'data', 'degreeDict.json')\n", (316, 357), False, 'import os\n'), ((369, 419), 'os.path.join', 'os.path.join', (['CURRENT_PATH', '"""data"""', '"""notDict.json"""'], {}), "(CURRENT_PATH, 'data', 'notDict.json')\n", (381, 419), False, 'import os\n'), ((437, 484), 'os.path.join', 'os.path.join', (['CURRENT_PATH', '"""data"""', '"""jieba.dic"""'], {}), "(CURRENT_PATH, 'data', 'jieba.dic')\n", (449, 484), False, 'import os\n'), ((495, 530), 'jieba.load_userdict', 'jieba.load_userdict', (['jieba_dic_path'], {}), '(jieba_dic_path)\n', (514, 530), False, 'import jieba\n'), ((115, 140), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (130, 140), False, 'import os\n'), ((969, 989), 'jieba.lcut', 'jieba.lcut', (['sentence'], {}), '(sentence)\n', (979, 989), False, 'import jieba\n')]
|
#!/usr/bin/env python2
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""
Rules for building documentation with Sphinx (sphinx-doc.org)
This provides two new targets:
* sphinx_wiki
* sphinx_manpage
Common Attributes:
name: str
Name of the buck target
python_binary_deps: List[target]
python_library_deps: List[target]
list of python_binary dependencies to include in the link-tree
Sphinx ``autodoc`` allows documents to reference doc-blocks from
modules, classes, etc. For python it does this by importing the
modules. This means the dependencies need to be assembled in the
same PYTHONPATH, with all native library dependencies built, etc.
It is important to differentiate between python_binary_deps and
python_library_deps because we cannot do introspection on the targets
themselves. For ``python_binary`` we actually depend on the
"{name}-library" target rather than the binary itself.
apidoc_modules: Dict[module_path, destination_dir]
``sphinx-apidoc`` is a command many run to auto-generate ".rst" files
for a Python package. ``sphinx-apidoc`` runs and outputs a document
tree, with ``.. automodule::`` and ``.. autoclass::`` references, which
is used by the subsequent Sphinx run to build out docs for those
modules, classes, functions, etc.
The output if ``sphinx-apidoc`` is a directory tree of its own, which
will merged in with the directory tree in ``srcs`` using ``rsync``.
The destination directory will be the name of ``destination_dir``
provided.
Keep in mind ``sphinx-apidoc`` runs at the root of ``PYTHONPATH``.
A rule like::
apidoc_modules = {
"mypackage.mymodule": "mymodule",
}
Will run ``sphinx-apidoc`` with the argument mypackage/mymodule,
and merge the output into the "mymodule" subdirectory with the
rest of ``srcs``.
genrule_srcs: Dict[binary_target, destination_dir]
Similar to ``apidoc_modules``, ``genrule_srcs`` provides a way to
generate source files during the build. The target needs to be a
binary target (runnable with "$(exe {target}) $OUT"), and needs to
accept a single argument "$OUT": the directory to write files to.
The ``destination_dir`` is the sub-directory to merge the files
into, alongside the declared ``srcs``.
config: Dict[str, Dict[str, Union[bool, int, str, List, Dict]]
This provides a way to override or add settings to conf.py,
sphinx-build and others
Section headers:
conf.py
sphinx-build
sphinx-apidoc
These need to serialize to JSON
label: List[str]
This provides a way to add one or more labels to the target, similar
to ``label`` for ``genrule``
sphinx_wiki
----------
This utilizes the Sphinx "xml" builder to generate a document
compliant with the Docutils DTD
Attributes:
srcs: List[Path]
list of document source files (usually .rst or .md)
wiki_root_path
Base URI location for documents to reside
This gets added to the conf.py, but typically is not used by Sphinx
in the build process. It is included here as metadata which can
be used by other tools via ``buck query``.
sphinx_manpage
--------------
This utilizes the Sphinx "man" builder to generate a Unix `Manual Page`
Attributes:
src: Path
The path to the source file (usually .rst or .md)
description: str
A one-line description of the program suitable for the NAME section
author: str
The program author
section: int
The manpage ``section``, defaults to ``1`` which is reserved for
programs
manpage_name: str [Optional]
The name of the manpage to use. The default is to use the target name
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
with allow_unsafe_import(): # noqa: magic
import collections
import json
import os
FBSPHINX_WRAPPER = '//fbsphinx:bin'
SPHINX_WRAPPER = '//fbsphinx:sphinx'
SPHINXCONFIG_TGT = '//:.sphinxconfig'
if False:
# avoid flake8 warnings for some things
from . import (
load,
read_config,
include_defs,
)
def import_macro_lib(path):
global _import_macro_lib__imported
include_defs('{}/{}.py'.format(
read_config('fbcode', 'macro_lib', '//macro_lib'), path
), '_import_macro_lib__imported')
ret = _import_macro_lib__imported
del _import_macro_lib__imported # Keep the global namespace clean
return ret
base = import_macro_lib('convert/base')
Rule = import_macro_lib('rule').Rule
python = import_macro_lib('convert/python')
fbcode_target = import_macro_lib('fbcode_target')
load("@fbcode_macros//build_defs:python_typing.bzl",
"get_typing_config_target")
SPHINX_SECTION = 'sphinx'
class _SphinxConverter(base.Converter):
"""
Produces a RuleTarget named after the base_path that points to the
correct platform default as defined in data
"""
def __init__(self, context):
super(_SphinxConverter, self).__init__(context)
self._converters = {
'python_binary': python.PythonConverter(context, 'python_binary'),
}
def get_allowed_args(self):
return {
'name',
'python_binary_deps',
'python_library_deps',
'apidoc_modules',
'genrule_srcs',
'config',
}
def get_buck_rule_type(self):
return 'genrule'
def _gen_genrule_srcs_rules(
self,
base_path,
name,
genrule_srcs,
):
"""
A simple genrule wrapper for running some target which generates rst
"""
if not genrule_srcs:
return
for target, outdir in genrule_srcs.items():
rule = fbcode_target.parse_target(target, base_path)
if '/' in outdir:
root, rest = outdir.split('/', 1)
else:
root = outdir
rest = '.'
yield Rule('genrule', collections.OrderedDict((
('name', name + '-genrule_srcs-' + rule.name),
('out', root),
('bash', 'mkdir -p $OUT/{rest} && $(exe {target}) $OUT/{rest}'.format(
target=target,
rest=rest,
)),
)))
def _gen_apidoc_rules(
self,
base_path,
name,
fbsphinx_wrapper_target,
apidoc_modules,
):
"""
A simple genrule wrapper for running sphinx-apidoc
"""
if not apidoc_modules:
return
for module, outdir in apidoc_modules.items():
command = ' '.join((
'mkdir -p $OUT && $(exe :{fbsphinx_wrapper_target})',
'buck apidoc',
module,
'$OUT',
)).format(
fbsphinx_wrapper_target=fbsphinx_wrapper_target,
)
yield Rule('genrule', collections.OrderedDict((
('name', name + '-apidoc-' + module),
('out', outdir),
('bash', command),
)))
def convert(
self,
base_path,
name,
apidoc_modules=None,
config=None,
genrule_srcs=None,
python_binary_deps=(),
python_library_deps=(),
src=None,
srcs=None,
visibility=None,
**kwargs
):
"""
Entry point for converting sphinx rules
"""
if srcs is None:
srcs = [src]
python_deps = tuple(python_library_deps) + tuple((
_dep + '-library'
for _dep
in tuple(python_binary_deps)
)) + (FBSPHINX_WRAPPER,)
fbsphinx_wrapper_target = '%s-fbsphinx-wrapper' % name
for rule in self._converters['python_binary'].convert(
base_path,
name=fbsphinx_wrapper_target,
par_style='xar',
py_version='>=3.6',
main_module='fbsphinx.bin.fbsphinx_wrapper',
deps=python_deps,
):
yield rule
additional_doc_rules = []
for rule in self._gen_apidoc_rules(
base_path,
name,
fbsphinx_wrapper_target,
apidoc_modules,
):
additional_doc_rules.append(rule)
yield rule
for rule in self._gen_genrule_srcs_rules(
base_path,
name,
genrule_srcs,
):
additional_doc_rules.append(rule)
yield rule
command = ' '.join((
'echo {BUCK_NONCE} >/dev/null &&',
'$(exe :{fbsphinx_wrapper_target})',
'buck run',
'--target {target}',
'--builder {builder}',
'--sphinxconfig $(location {SPHINXCONFIG_TGT})',
"--config '{config}'",
"--generated-sources '{generated_sources}'",
'.', # source dir
'$OUT',
)).format(
BUCK_NONCE=os.environ.get('BUCK_NONCE', ''),
fbsphinx_wrapper_target=fbsphinx_wrapper_target,
target='//{}:{}'.format(base_path, name),
builder=self.get_builder(),
SPHINXCONFIG_TGT=SPHINXCONFIG_TGT,
config=json.dumps(config or {}),
generated_sources=json.dumps([
'$(location {})'.format(rule.target_name)
for rule
in additional_doc_rules
]),
)
yield Rule('genrule', collections.OrderedDict((
('name', name),
('type', self.get_fbconfig_rule_type()),
('out', 'builder=%s' % self.get_builder()),
('bash', command),
('srcs', srcs),
('labels', self.get_labels(name, **kwargs)),
)))
def get_labels(self, name, **kwargs):
return ()
def get_extra_confpy_assignments(self, name, **kwargs):
return collections.OrderedDict()
class SphinxWikiConverter(_SphinxConverter):
"""
Concrete class for converting sphinx_wiki rules
"""
def get_allowed_args(self):
allowed_args = super(SphinxWikiConverter, self).get_allowed_args()
allowed_args.update({
'srcs',
'wiki_root_path',
})
return allowed_args
def get_fbconfig_rule_type(self):
return 'sphinx_wiki'
def get_builder(self):
return 'wiki'
def get_labels(self, name, **kwargs):
return (
'wiki_root_path:%s' % kwargs.get('wiki_root_path'),
)
class SphinxManpageConverter(_SphinxConverter):
"""
Concrete class for converting sphinx_manpage rules
"""
def get_allowed_args(self):
allowed_args = super(SphinxManpageConverter, self).get_allowed_args()
allowed_args.update({
'src',
'author',
'description',
'section',
'manpage_name',
})
return allowed_args
def get_fbconfig_rule_type(self):
return 'sphinx_manpage'
def get_builder(self):
return 'manpage'
def get_labels(self, name, **kwargs):
return (
'description:%s' % kwargs.get('description'),
'author:%s' % kwargs.get('author'),
'section:%d' % kwargs.get('section', 1),
'manpage_name:%s' % kwargs.get('manpage_name', name),
)
def get_extra_confpy_assignments(self, name, **kwargs):
return {
'man_pages': [{
'doc': 'master_doc',
'name': kwargs.get('manpage_name', name),
'description': kwargs.get('description'),
'author': kwargs.get('author'),
'section': kwargs.get('section', 1),
}],
}
|
[
"collections.OrderedDict",
"os.environ.get",
"json.dumps"
] |
[((10517, 10542), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (10540, 10542), False, 'import collections\n'), ((9585, 9617), 'os.environ.get', 'os.environ.get', (['"""BUCK_NONCE"""', '""""""'], {}), "('BUCK_NONCE', '')\n", (9599, 9617), False, 'import os\n'), ((9840, 9864), 'json.dumps', 'json.dumps', (['(config or {})'], {}), '(config or {})\n', (9850, 9864), False, 'import json\n'), ((7524, 7627), 'collections.OrderedDict', 'collections.OrderedDict', (["(('name', name + '-apidoc-' + module), ('out', outdir), ('bash', command))"], {}), "((('name', name + '-apidoc-' + module), ('out',\n outdir), ('bash', command)))\n", (7547, 7627), False, 'import collections\n')]
|
import requests
import json
print("register node 5000, 5001, 5002, 5003, 5004, 5005")
m_node = {
"nodes" : ["http://127.0.0.1:5000","http://127.0.0.1:5001",
"http://127.0.0.1:5002","http://127.0.0.1:5003","http://127.0.0.1:5004","http://127.0.0.1:5005"]
}
r = requests.post('http://127.0.0.1:5000/nodes/register',json = m_node)
print(r.text)
print("---------------------------------------------------------------")
print("get all info from chain")
r = requests.get('http://127.0.0.1:5000/chain')
print(r.text)
print("---------------------------------------------------------------")
block = {
"upc" : "300871365612",
"item_no":1,
"owner": "TinVu",
}
print("mine 1000 products to blockchain")
for a in range(1, 5):
block['item_no'] = a
#print(block)
r = requests.post('http://127.0.0.1:5000/register',json = block)
r = requests.get('http://127.0.0.1:5000/chain')
print(r.text)
print("---------------------------------------------------------------")
trans_block = {
"upc" : "300871365612",
"item_no":1,
"current_owner": "TinVu",
"new_owner": "A",
}
#r = requests.post('http://127.0.0.1:5000/transaction',json = trans_block)
print("do transaction 10 products to blockchain")
for a in range(1, 5):
trans_block['new_owner'] = trans_block['new_owner']+str(a)
trans_block['item_no'] = a
#print(trans_block)
r = requests.post('http://127.0.0.1:5000/transaction',json = trans_block)
r = requests.get('http://127.0.0.1:5000/chain')
print(r.text)
print("---------------------------------------------------------------")
print("validate 300871365612,1,TinVu ")
murl ='http://127.0.0.1:5000/validate/300871365612,1,TinVu'
r = requests.get(murl)
print(r.text)
print("---------------------------------------------------------------")
print("validate 300871365612,1,A1")
murl ='http://127.0.0.1:5000/validate/300871365612,1,A1'
r = requests.get(murl)
print(r.text)
print("---------------------------------------------------------------")
|
[
"requests.post",
"requests.get"
] |
[((272, 338), 'requests.post', 'requests.post', (['"""http://127.0.0.1:5000/nodes/register"""'], {'json': 'm_node'}), "('http://127.0.0.1:5000/nodes/register', json=m_node)\n", (285, 338), False, 'import requests\n'), ((465, 508), 'requests.get', 'requests.get', (['"""http://127.0.0.1:5000/chain"""'], {}), "('http://127.0.0.1:5000/chain')\n", (477, 508), False, 'import requests\n'), ((861, 904), 'requests.get', 'requests.get', (['"""http://127.0.0.1:5000/chain"""'], {}), "('http://127.0.0.1:5000/chain')\n", (873, 904), False, 'import requests\n'), ((1461, 1504), 'requests.get', 'requests.get', (['"""http://127.0.0.1:5000/chain"""'], {}), "('http://127.0.0.1:5000/chain')\n", (1473, 1504), False, 'import requests\n'), ((1697, 1715), 'requests.get', 'requests.get', (['murl'], {}), '(murl)\n', (1709, 1715), False, 'import requests\n'), ((1902, 1920), 'requests.get', 'requests.get', (['murl'], {}), '(murl)\n', (1914, 1920), False, 'import requests\n'), ((795, 854), 'requests.post', 'requests.post', (['"""http://127.0.0.1:5000/register"""'], {'json': 'block'}), "('http://127.0.0.1:5000/register', json=block)\n", (808, 854), False, 'import requests\n'), ((1386, 1454), 'requests.post', 'requests.post', (['"""http://127.0.0.1:5000/transaction"""'], {'json': 'trans_block'}), "('http://127.0.0.1:5000/transaction', json=trans_block)\n", (1399, 1454), False, 'import requests\n')]
|
# Taken from: https://github.com/jhollowayj/tensorflow_slurm_manager
# ref:
# https://github.com/jhollowayj/tensorflow_slurm_manager/blob/master/slurm_manager.py
# @IgnorePep8
'''
'''
from __future__ import print_function
import os
import re
# import socket
# depends on hostlist: pip install python-hostlist
import hostlist
from .base import ClusterParser
__all__ = ('SlurmClusterParser',)
# It may be useful to know that slurm_nodeid tells you which node you are one
# (in case there is more than one task on any given node...)
# Perhaps you could better assign parameter servers be distributed across all
# nodes before doubleing up on one.
class SlurmClusterParser(ClusterParser):
'''
:param num_param_servers: Default -1 meaning one parameter server per
node. The remaining processes on the node are workers. The
num_parameter_servers be less than or equal to the number of
individual physical nodes
:param starting_port: Starting port for setting up jobs. Default: 2300
TODO: Maybe use SLURM_STEP_RESV_PORTS environment if available.
https://stackoverflow.com/a/36803148/3457624
:param str network: Use a specific network cluster.
Ex. network='ib.cluster' The hosts are then specified as:
'{}.{}'.format(hostname, network)
'''
def __init__(self, num_param_servers=-1, starting_port=2300,
network=None):
num_workers = None
# Check Environment for all needed SLURM variables
# SLURM_NODELIST for backwards compatability if needed.
assert 'SLURM_JOB_NODELIST' in os.environ
assert 'SLURM_TASKS_PER_NODE' in os.environ
assert 'SLURM_PROCID' in os.environ
assert 'SLURM_NPROCS' in os.environ
assert 'SLURM_NNODES' in os.environ
# Grab SLURM variables
# expands 'NAME1(x2),NAME2' -> 'NAME1,NAME1,NAME2'
self._hostnames = hostlist.expand_hostlist(
os.environ['SLURM_JOB_NODELIST'])
if network is not None:
self._hostnames = [
# socket.gethostbyname('{}.{}'.format(hname, network))
'{}.{}'.format(hname, network)
for hname in self._hostnames]
# expands '1,2(x2)' -> '1,2,2'
self._num_tasks_per_host = self._parse_slurm_tasks_per_node(
os.environ['SLURM_TASKS_PER_NODE'])
# index into hostnames/num_tasks_per_host lists
self._my_proc_id = int(os.environ['SLURM_PROCID'])
self.num_processes = int(os.environ['SLURM_NPROCS'])
self.nnodes = int(os.environ['SLURM_NNODES'])
# Sanity check that everything has been parsed correctly
nhosts = len(self.hostnames)
assert nhosts == len(self.num_tasks_per_host)
assert nhosts == self.nnodes
assert self.num_processes == sum(self.num_tasks_per_host)
# Numbber of PS/Workers
# Note: I'm making the assumption that having more than one PS/node
# doesn't add any benefit. It makes code simpler in
# self.build_cluster_spec()
self._num_parameter_servers = min(num_param_servers, nhosts) \
if num_param_servers > 0 else nhosts
if num_workers is None:
# Currently I'm not using num_workers'
# TODO: What happens to num_workers once I allocate less PS than
# they requested?
# default to all other nodes doing something
self.num_workers = self.num_processes - self.num_parameter_servers
# Default port to use
self._starting_port = starting_port # use user specified port
def _parse_slurm_tasks_per_node(self, num_tasks_per_nodes):
'''
SLURM_TASKS_PER_NODE Comes in compressed, so we need to uncompress it:
e.g: if slurm gave us the following setup:
Host 1: 1 process
Host 2: 3 processes
Host 3: 3 processes
Host 4: 4 processes
Then the environment variable SLURM_TASKS_PER_NODE = '1,3(x2),4'
But we need it to become this => [1, 3, 3, 4]
'''
final_list = []
num_tasks_per_nodes = num_tasks_per_nodes.split(',')
for node in num_tasks_per_nodes:
if 'x' in node: # "n(xN)"; n=tasks, N=repeats
n_tasks, n_nodes = [int(n) for n in re.findall('\d+', node)]
final_list += [n_tasks] * n_nodes
else:
final_list.append(int(node))
return final_list
@property
def num_tasks_per_host(self):
'''List of integers with each element specifying number of tasks on a
host. This list and hostnames list must be in the same order.'''
return self._num_tasks_per_host
@property
def hostnames(self):
'''List of hosts with each element specifying the host name.'''
return self._hostnames
@property
def num_parameter_servers(self):
'''Number of parameter servers to create/use in the cluster.'''
return self._num_parameter_servers
@property
def my_proc_id(self):
'''Current process's id or rank.'''
return self._my_proc_id
@property
def starting_port(self):
'''Current process's id or rank.'''
return self._starting_port
if __name__ == '__main__':
# run test via: srun -l python -m keras_exp.distrib.cluster_parsers.slurm
from ._test import test
scpar = SlurmClusterParser()
test(scpar)
# sys.exit(0)
|
[
"re.findall",
"hostlist.expand_hostlist"
] |
[((1964, 2022), 'hostlist.expand_hostlist', 'hostlist.expand_hostlist', (["os.environ['SLURM_JOB_NODELIST']"], {}), "(os.environ['SLURM_JOB_NODELIST'])\n", (1988, 2022), False, 'import hostlist\n'), ((4421, 4445), 're.findall', 're.findall', (['"""\\\\d+"""', 'node'], {}), "('\\\\d+', node)\n", (4431, 4445), False, 'import re\n')]
|
from flask import Flask, render_template, request, jsonify
import conversation
# import traceback
app = Flask(__name__)
app.config["DEBUG"] = True
conversation.initBrain()
@app.route('/')
def index():
return render_template('main_page.html')
@app.route('/api/', methods=["GET","POST"])
def api():
try:
if request.method == "POST":
data = request.get_json()
query = data['query']
reply = conversation.botAnswer(query)
# dict can also be used as param for jsonify
return jsonify(
response=reply,
mode="reply"
)
except Exception as e:
return jsonify(
response="Error: " + str(e) # + '\n>> Traceback <<\n' + str(traceback.print_exc())
)
@app.route('/quote', methods=["GET"])
def quote():
from apis import quotes
try:
return quotes.getQuote()
except Exception as e:
return "Error: " + str(e)
@app.route('/test', methods=["GET"])
def test():
from apis import quotes
try:
return "Test Successful!"
except Exception as e:
return "Error: " + str(e)
if __name__ == "__main__":
app.run()
|
[
"apis.quotes.getQuote",
"conversation.botAnswer",
"conversation.initBrain",
"flask.Flask",
"flask.jsonify",
"flask.render_template",
"flask.request.get_json"
] |
[((106, 121), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (111, 121), False, 'from flask import Flask, render_template, request, jsonify\n'), ((149, 173), 'conversation.initBrain', 'conversation.initBrain', ([], {}), '()\n', (171, 173), False, 'import conversation\n'), ((216, 249), 'flask.render_template', 'render_template', (['"""main_page.html"""'], {}), "('main_page.html')\n", (231, 249), False, 'from flask import Flask, render_template, request, jsonify\n'), ((896, 913), 'apis.quotes.getQuote', 'quotes.getQuote', ([], {}), '()\n', (911, 913), False, 'from apis import quotes\n'), ((372, 390), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (388, 390), False, 'from flask import Flask, render_template, request, jsonify\n'), ((445, 474), 'conversation.botAnswer', 'conversation.botAnswer', (['query'], {}), '(query)\n', (467, 474), False, 'import conversation\n'), ((551, 588), 'flask.jsonify', 'jsonify', ([], {'response': 'reply', 'mode': '"""reply"""'}), "(response=reply, mode='reply')\n", (558, 588), False, 'from flask import Flask, render_template, request, jsonify\n')]
|
import sys
import csv
import json
icsv = csv.DictReader(sys.stdin)
ojson = { "departments": [] }
dept_data = ojson["departments"]
dept_name_idx = {}
for line in icsv:
dept_name = line["dept_name"]
dept = dept_name_idx.get(dept_name)
if dept is None:
dept = dept_name_idx[dept_name] = { "name": dept_name, "employees": [] }
dept_data.append(dept)
dept["employees"].append({
"name": line["empl_name"],
"surname": line["empl_surname"],
"position": line["empl_position"],
"salary": int(line["empl_salary"]) })
json.dump(ojson, sys.stdout, indent=2, sort_keys=True, separators=(',', ': '))
|
[
"json.dump",
"csv.DictReader"
] |
[((43, 68), 'csv.DictReader', 'csv.DictReader', (['sys.stdin'], {}), '(sys.stdin)\n', (57, 68), False, 'import csv\n'), ((590, 668), 'json.dump', 'json.dump', (['ojson', 'sys.stdout'], {'indent': '(2)', 'sort_keys': '(True)', 'separators': "(',', ': ')"}), "(ojson, sys.stdout, indent=2, sort_keys=True, separators=(',', ': '))\n", (599, 668), False, 'import json\n')]
|
import logging
import threading
from typing import Any
from cobald.daemon.debug import NameRepr
class BaseRunner(object):
flavour = None # type: Any
def __init__(self):
self._logger = logging.getLogger(
"cobald.runtime.runner.%s" % NameRepr(self.flavour)
)
self._payloads = []
self._lock = threading.Lock()
#: signal that runner should keep in running
self.running = threading.Event()
#: signal that runner has stopped
self._stopped = threading.Event()
self.running.clear()
self._stopped.set()
def __bool__(self):
with self._lock:
return bool(self._payloads) or self.running.is_set()
def register_payload(self, payload):
"""
Register ``payload`` for asynchronous execution
This runs ``payload`` as an orphaned background task as soon as possible.
It is an error for ``payload`` to return or raise anything without handling it.
"""
with self._lock:
self._payloads.append(payload)
def run_payload(self, payload):
"""
Register ``payload`` for synchronous execution
This runs ``payload`` as soon as possible, blocking until completion.
Should ``payload`` return or raise anything, it is propagated to the caller.
"""
raise NotImplementedError
def run(self):
"""
Execute all current and future payloads
Blocks and executes payloads until :py:meth:`stop` is called.
It is an error for any orphaned payload to return or raise.
"""
self._logger.info("runner started: %s", self)
try:
with self._lock:
assert not self.running.is_set() and self._stopped.is_set(), (
"cannot re-run: %s" % self
)
self.running.set()
self._stopped.clear()
self._run()
except Exception:
self._logger.exception("runner aborted: %s", self)
raise
else:
self._logger.info("runner stopped: %s", self)
finally:
with self._lock:
self.running.clear()
self._stopped.set()
def _run(self):
raise NotImplementedError
def stop(self):
"""Stop execution of all current and future payloads"""
if not self.running.wait(0.2):
return
self._logger.debug("runner disabled: %s", self)
with self._lock:
self.running.clear()
self._stopped.wait()
class OrphanedReturn(Exception):
"""A runnable returned a value without anyone to receive it"""
def __init__(self, who, value):
super().__init__("no caller to receive %s from %s" % (value, who))
self.who = who
self.value = value
|
[
"threading.Lock",
"threading.Event",
"cobald.daemon.debug.NameRepr"
] |
[((347, 363), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (361, 363), False, 'import threading\n'), ((440, 457), 'threading.Event', 'threading.Event', ([], {}), '()\n', (455, 457), False, 'import threading\n'), ((524, 541), 'threading.Event', 'threading.Event', ([], {}), '()\n', (539, 541), False, 'import threading\n'), ((265, 287), 'cobald.daemon.debug.NameRepr', 'NameRepr', (['self.flavour'], {}), '(self.flavour)\n', (273, 287), False, 'from cobald.daemon.debug import NameRepr\n')]
|
import argparse
import subprocess
from pathlib import Path
from multiprocessing import Pool
picard_jar = None
def normalized_unique_reads(number_of_reads, bam):
try:
downsampled_library = downsample(number_of_reads, bam)
unique_bam = remove_duplicates(downsampled_library)
unique_reads = count_bam_reads(unique_bam)
return (bam, number_of_reads, unique_reads)
except:
return (bam, number_of_reads, -1)
def count_bam_reads(filename):
result = subprocess.run(['samtools', 'view', '-c', filename], check=True, stdout=subprocess.PIPE)
count = int(result.stdout.strip().decode('utf-8'))
return count
def downsample(number_of_reads, bam):
# compute the fraction of reads to retain
read_count = count_bam_reads(bam)
fraction = number_of_reads / read_count
if fraction > 1.0:
raise ValueError('Cannot upsample {} from {:d} to {:d}'.format(bam, read_count, number_of_reads))
bam_path = Path(bam)
if bam_path.suffix != '.bam':
raise ValueError('Not a BAM {}'.format(bam))
output = '{}_{:d}.bam'.format(bam_path.stem, number_of_reads)
# run Picard to downsample
subprocess.run(['java', '-Xmx4500m', '-jar', picard_jar, 'DownsampleSam', 'PROBABILITY={:f}'.format(fraction), 'I={}'.format(bam), 'O={}'.format(output)], check=True)
return output
def remove_duplicates(bam):
bam_path = Path(bam)
output = '{}.noduplicates.bam'.format(bam_path.stem)
# run Picard MarkDuplicates to remove duplicates to get unique read count
subprocess.run(['java', '-Xmx4500m', '-jar', picard_jar, 'MarkDuplicates', 'I={}'.format(bam), 'O={}'.format(output), 'M={}.{}'.format(output, 'dedup_stats'), 'REMOVE_DUPLICATES=true', 'BARCODE_TAG=XD', 'ADD_PG_TAG_TO_READS=false', 'MAX_FILE_HANDLES=1000'], check=True)
return output
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Normalize complexity using number of unique reads given a number of reads (hitting targets)", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', "--num_threads", help="size of thread pool", type=int, default=1)
parser.add_argument("--picard", help="Broad picard jar", default='/n/groups/reich/matt/pipeline/static/picard-v2.17.10.jar')
parser.add_argument("bams", help="bam files, filtered to 1240k targets and sorted", nargs='+')
args = parser.parse_args()
picard_jar = args.picard
pool = Pool(processes=args.num_threads)
results = []
for number_of_reads in [5e5, 1e6, 2e6, 4e6]:
for library in args.bams:
results.append(pool.apply_async(normalized_unique_reads, args=(int(number_of_reads), library) ) )
pool.close()
pool.join()
for result in results:
values = result.get()
library_path = Path(values[0])
print('{}\t{:d}\t{:d}'.format(library_path.stem, int(values[1]), values[2]))
|
[
"subprocess.run",
"pathlib.Path",
"argparse.ArgumentParser",
"multiprocessing.Pool"
] |
[((458, 551), 'subprocess.run', 'subprocess.run', (["['samtools', 'view', '-c', filename]"], {'check': '(True)', 'stdout': 'subprocess.PIPE'}), "(['samtools', 'view', '-c', filename], check=True, stdout=\n subprocess.PIPE)\n", (472, 551), False, 'import subprocess\n'), ((903, 912), 'pathlib.Path', 'Path', (['bam'], {}), '(bam)\n', (907, 912), False, 'from pathlib import Path\n'), ((1307, 1316), 'pathlib.Path', 'Path', (['bam'], {}), '(bam)\n', (1311, 1316), False, 'from pathlib import Path\n'), ((1770, 1966), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Normalize complexity using number of unique reads given a number of reads (hitting targets)"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Normalize complexity using number of unique reads given a number of reads (hitting targets)'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (1793, 1966), False, 'import argparse\n'), ((2337, 2369), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'args.num_threads'}), '(processes=args.num_threads)\n', (2341, 2369), False, 'from multiprocessing import Pool\n'), ((2657, 2672), 'pathlib.Path', 'Path', (['values[0]'], {}), '(values[0])\n', (2661, 2672), False, 'from pathlib import Path\n')]
|
#!/usr/bin/python
import logging
import logging.handlers
import configparser
from time import sleep
from SolarManager import SolarManager
def log_setup():
formatter = logging.Formatter("%(asctime)s :: %(name)s :: %(levelname)s :: %(message)s")
logLevel = logging.INFO
log_handler = logging.handlers.TimedRotatingFileHandler("logs/SolarManager.log", when="midnight", interval=1, backupCount=30)
log_handler.setFormatter(formatter)
log_handler.setLevel(logLevel)
logger = logging.getLogger()
logger.addHandler(log_handler)
logger.setLevel(logLevel)
log_setup()
LOG = logging.getLogger("SolarManager.Service")
LOG.info("Starting service.")
try:
configFileName = "config.txt"
configParser = configparser.ConfigParser()
configParser.read(configFileName)
sleepTimeSeconds = configParser.getint("SolarManager", "SolarCheckInterval")
solarManager = SolarManager.SolarManager(configParser.get("WeConnect", "Username"), configParser.get("WeConnect", "Password"), configFileName)
while True:
solarManager.run()
LOG.info(f"Sleeping for {sleepTimeSeconds} seconds")
sleep(sleepTimeSeconds)
except Exception as e:
LOG.error(f"An error occured while running the service: {e}", exc_info=True)
raise e
|
[
"time.sleep",
"logging.Formatter",
"logging.handlers.TimedRotatingFileHandler",
"configparser.ConfigParser",
"logging.getLogger"
] |
[((608, 649), 'logging.getLogger', 'logging.getLogger', (['"""SolarManager.Service"""'], {}), "('SolarManager.Service')\n", (625, 649), False, 'import logging\n'), ((179, 255), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s :: %(name)s :: %(levelname)s :: %(message)s"""'], {}), "('%(asctime)s :: %(name)s :: %(levelname)s :: %(message)s')\n", (196, 255), False, 'import logging\n'), ((303, 419), 'logging.handlers.TimedRotatingFileHandler', 'logging.handlers.TimedRotatingFileHandler', (['"""logs/SolarManager.log"""'], {'when': '"""midnight"""', 'interval': '(1)', 'backupCount': '(30)'}), "('logs/SolarManager.log', when=\n 'midnight', interval=1, backupCount=30)\n", (344, 419), False, 'import logging\n'), ((504, 523), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (521, 523), False, 'import logging\n'), ((740, 767), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (765, 767), False, 'import configparser\n'), ((1158, 1181), 'time.sleep', 'sleep', (['sleepTimeSeconds'], {}), '(sleepTimeSeconds)\n', (1163, 1181), False, 'from time import sleep\n')]
|
from django.core.management.base import BaseCommand
from observations.models import Datasourcetype
class Command(BaseCommand):
def handle(self, *args, **options):
Datasourcetype.objects.get_or_create(
name="Digital Matter Sensornode LoRaWAN",
defaults={
"description": "Digital Matter Sensornode LoRaWAN",
"parser": "sensornode",
},
)
|
[
"observations.models.Datasourcetype.objects.get_or_create"
] |
[((178, 352), 'observations.models.Datasourcetype.objects.get_or_create', 'Datasourcetype.objects.get_or_create', ([], {'name': '"""Digital Matter Sensornode LoRaWAN"""', 'defaults': "{'description': 'Digital Matter Sensornode LoRaWAN', 'parser': 'sensornode'}"}), "(name=\n 'Digital Matter Sensornode LoRaWAN', defaults={'description':\n 'Digital Matter Sensornode LoRaWAN', 'parser': 'sensornode'})\n", (214, 352), False, 'from observations.models import Datasourcetype\n')]
|
from django.urls import path
from rest_framework.routers import SimpleRouter
from apps.userAuth.views.v1.views import SendCodeView, UserViewSet
router = SimpleRouter(trailing_slash=False)
router.register('user', UserViewSet, base_name='user')
urlpatterns = [
path('sendcode', SendCodeView.as_view(), name='sendcode'),
# path('register', RegisterView.as_view(), name='register'),
# path('active', ActiveView.as_view(), name='active'),
]
urlpatterns += router.urls
|
[
"apps.userAuth.views.v1.views.SendCodeView.as_view",
"rest_framework.routers.SimpleRouter"
] |
[((156, 190), 'rest_framework.routers.SimpleRouter', 'SimpleRouter', ([], {'trailing_slash': '(False)'}), '(trailing_slash=False)\n', (168, 190), False, 'from rest_framework.routers import SimpleRouter\n'), ((284, 306), 'apps.userAuth.views.v1.views.SendCodeView.as_view', 'SendCodeView.as_view', ([], {}), '()\n', (304, 306), False, 'from apps.userAuth.views.v1.views import SendCodeView, UserViewSet\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.